Update README.md
Browse files
README.md
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
This model used hfl/chinese-roberta-wwm-ext-large backbone and was trained on SNLI, MNLI, DNLI, KvPI data in Chinese version.
|
2 |
Model structures are as follows:
|
3 |
|
4 |
-
|
|
|
5 |
def __init__(self, tagset_size):
|
6 |
super(RobertaForSequenceClassification, self).__init__()
|
7 |
self.tagset_size = tagset_size
|
@@ -34,4 +35,4 @@ class RobertaClassificationHead(nn.Module):
|
|
34 |
x = self.out_proj(x)
|
35 |
return x
|
36 |
model = RobertaForSequenceClassification(num_labels)
|
37 |
-
model.load_state_dict(torch.load(args.model_save_path+'Roberta_large_model.pt', map_location=device))
|
|
|
1 |
This model used hfl/chinese-roberta-wwm-ext-large backbone and was trained on SNLI, MNLI, DNLI, KvPI data in Chinese version.
|
2 |
Model structures are as follows:
|
3 |
|
4 |
+
```
|
5 |
+
class RobertaForSequenceClassification(nn.Module):
|
6 |
def __init__(self, tagset_size):
|
7 |
super(RobertaForSequenceClassification, self).__init__()
|
8 |
self.tagset_size = tagset_size
|
|
|
35 |
x = self.out_proj(x)
|
36 |
return x
|
37 |
model = RobertaForSequenceClassification(num_labels)
|
38 |
+
model.load_state_dict(torch.load(args.model_save_path+'Roberta_large_model.pt', map_location=device))```
|