Instructions to use Non-SHADovcy/synthetic-cpp-code-detection with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Non-SHADovcy/synthetic-cpp-code-detection with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("feature-extraction", model="Non-SHADovcy/synthetic-cpp-code-detection", trust_remote_code=True)# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("Non-SHADovcy/synthetic-cpp-code-detection", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
| import torch | |
| import torch.nn as nn | |
| from transformers import PreTrainedModel, AutoModel | |
| from .model_config import CustomConfig | |
| class LogRegClassifier(nn.Module): | |
| def __init__(self, transformer_output_dim): | |
| super(LogRegClassifier, self).__init__() | |
| self.linear = nn.Linear(transformer_output_dim, 1) | |
| def forward(self, x): | |
| return torch.sigmoid(self.linear(x)) | |
| class CombinedModel(PreTrainedModel): | |
| config_class = CustomConfig | |
| def __init__(self, config): | |
| super().__init__(config) | |
| self.transformer = AutoModel.from_pretrained(config.transformer_type) | |
| self.classifier = LogRegClassifier(config.transformer_output_dim) | |
| def forward(self, input_ids, attention_mask): | |
| outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask) | |
| pooled_output = outputs.last_hidden_state[:, 0, :] | |
| return self.classifier(pooled_output) | |