Kartikeyssj2 commited on
Commit
b1e7f8d
1 Parent(s): 5d9ed6e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +44 -16
main.py CHANGED
@@ -33,8 +33,11 @@ from sentence_transformers import SentenceTransformer, util
33
  from textblob import TextBlob
34
  import nltk
35
 
36
- nltk.download('punkt_tab')
37
- nltk.download('words')
 
 
 
38
 
39
  ''''''''''''''''''''''''' Skeletal Structure for the Models '''''''''''''''''''''''''''
40
 
@@ -89,20 +92,24 @@ class DistilBertForRegression(DistilBertPreTrainedModel):
89
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
90
 
91
 
92
- print("Downloading pronunciation tokenizer...")
93
- pronunciation_tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
94
- print("Downloading pronunciation model...")
95
- pronunciation_model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
 
 
96
 
97
- pronunciation_model.to(device)
 
98
 
99
- ''''''''''''''''''' Loading the Fluency Model and Tokenizer '''''''''''''
 
 
100
 
101
- print("Downloading fluency tokenizer...")
102
- fluency_tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
103
- print("Downloading fluency model...")
104
- fluency_model = DistilBertForRegression.from_pretrained("Kartikeyssj2/Fluency_Scoring_V2")
105
- print("Download completed.")
106
 
107
  fluency_model.to(device)
108
  # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -128,7 +135,17 @@ linreg_pronunciation = load_pickle_file("pronunciation_model_biasing.pkl")
128
 
129
  '''''''''''''''''''''' Load the Content Relevance and Scoring Model '''''''''''''''
130
 
131
- content_relevance_model = SentenceTransformer('sentence-transformers/msmarco-distilbert-cos-v5')
 
 
 
 
 
 
 
 
 
 
132
 
133
  print(linreg_fluency)
134
  print(linreg_pronunciation)
@@ -143,9 +160,20 @@ import torch
143
 
144
  ''''''''''''''''''''''' IMAGE CAPTIONING MODEL '''''''''''''''''
145
 
146
- image_captioning_processor = BlipProcessor.from_pretrained("noamrot/FuseCap")
 
 
 
 
 
 
 
147
 
148
- image_captioning_model = BlipForConditionalGeneration.from_pretrained("noamrot/FuseCap").to(device)
 
 
 
 
149
 
150
 
151
 
 
33
  from textblob import TextBlob
34
  import nltk
35
 
36
+ data_dir = 'nltk_data'
37
+
38
+ # Set the NLTK data path to the local directory
39
+ nltk.data.path.append(data_dir)
40
+
41
 
42
  ''''''''''''''''''''''''' Skeletal Structure for the Models '''''''''''''''''''''''''''
43
 
 
92
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
93
 
94
 
95
+ # Load the Pronunciation model and tokenizer from the local directory
96
+ pronunciation_model_dir = 'pronunciation_model'
97
+ fluency_model_dir = 'fluency_model'
98
+
99
+ print("Loading pronunciation tokenizer from local directory...")
100
+ pronunciation_tokenizer = Wav2Vec2Tokenizer.from_pretrained(pronunciation_model_dir)
101
 
102
+ print("Loading pronunciation model from local directory...")
103
+ pronunciation_model = Wav2Vec2ForCTC.from_pretrained(pronunciation_model_dir)
104
 
105
+ # Load the Fluency model and tokenizer from the local directory
106
+ print("Loading fluency tokenizer from local directory...")
107
+ fluency_tokenizer = DistilBertTokenizer.from_pretrained(fluency_model_dir)
108
 
109
+ print("Loading fluency model from local directory...")
110
+ fluency_model = DistilBertForSequenceClassification.from_pretrained(fluency_model_dir)
111
+
112
+ print("Models loaded successfully.")
 
113
 
114
  fluency_model.to(device)
115
  # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
135
 
136
  '''''''''''''''''''''' Load the Content Relevance and Scoring Model '''''''''''''''
137
 
138
+
139
+ model_dir = 'content_relevance_model'
140
+
141
+ # Load the SentenceTransformer model from the local directory
142
+ print("Loading SentenceTransformer model from local directory...")
143
+ model = SentenceTransformer(model_dir)
144
+
145
+ print("Model loaded successfully.")
146
+
147
+
148
+
149
 
150
  print(linreg_fluency)
151
  print(linreg_pronunciation)
 
160
 
161
  ''''''''''''''''''''''' IMAGE CAPTIONING MODEL '''''''''''''''''
162
 
163
+ # Define the directories where the models and processors are saved
164
+ processor_dir = 'blip_processor'
165
+ model_dir = 'blip_model'
166
+
167
+ # Load the BlipProcessor from the local directory
168
+ print("Loading BlipProcessor from local directory...")
169
+ image_captioning_processor = BlipProcessor.from_pretrained(processor_dir)
170
+ print("BlipProcessor loaded successfully.")
171
 
172
+ # Load the BlipForConditionalGeneration model from the local directory
173
+ print("Loading BlipForConditionalGeneration model from local directory...")
174
+ image_captioning_model = BlipForConditionalGeneration.from_pretrained(model_dir)
175
+ image_captioning_model.to(device) # Move model to the appropriate device
176
+ print("BlipForConditionalGeneration model loaded successfully.")
177
 
178
 
179