Erfan11 commited on
Commit
7c2e0a4
1 Parent(s): e702be8

Create config.json

Browse files
Files changed (1) hide show
  1. config.json +24 -0
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "bert", # Change this based on your model type (e.g., gpt2, roberta, etc.)
3
+ "num_labels": 2, # Number of output labels for classification (adjust for your task)
4
+ "hidden_size": 768, # Hidden layer size (depends on your model)
5
+ "intermediate_size": 3072, # Intermediate size for feed-forward layers
6
+ "max_position_embeddings": 512, # Max token length
7
+ "num_attention_heads": 12, # Number of attention heads
8
+ "num_hidden_layers": 12, # Number of hidden layers in your transformer model
9
+ "vocab_size": 30522, # Size of tokenizer vocabulary
10
+ "hidden_act": "gelu", # Activation function in hidden layers
11
+ "initializer_range": 0.02, # Initialization range for weights
12
+ "layer_norm_eps": 1e-12, # Layer normalization epsilon
13
+ "pad_token_id": 0, # Padding token ID (usually 0)
14
+ "type_vocab_size": 2, # Type vocab size (typically 2 for sentence pairs)
15
+ "attention_probs_dropout_prob": 0.1, # Dropout probability for attention layers
16
+ "hidden_dropout_prob": 0.1, # Dropout probability for hidden layers
17
+ "use_cache": true, # Whether to cache past keys/values
18
+ "model_version": "1.0", # Your model version
19
+ "tokenizer_class": "BertTokenizer", # Tokenizer class (adjust for your model type)
20
+ "classifier_dropout": null, # Optional dropout for classification head
21
+ "architectures": [
22
+ "BertForSequenceClassification" # Model architecture type
23
+ ]
24
+ }