Spaces:
Sleeping
Sleeping
updated app.py generate response function
Browse files
app.py
CHANGED
@@ -5,15 +5,15 @@ from transformers import AutoModelWithLMHead, AutoTokenizer
|
|
5 |
# Load model directly
|
6 |
|
7 |
|
8 |
-
|
9 |
-
|
10 |
|
11 |
# Function for response generation
|
12 |
|
13 |
def generate_query_response(prompt, max_length=200):
|
14 |
|
15 |
-
model =
|
16 |
-
tokenizer =
|
17 |
|
18 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
19 |
|
|
|
5 |
# Load model directly
|
6 |
|
7 |
|
8 |
+
loaded_tokenizer = AutoTokenizer.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
|
9 |
+
loaded_model = AutoModelWithLMHead.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
|
10 |
|
11 |
# Function for response generation
|
12 |
|
13 |
def generate_query_response(prompt, max_length=200):
|
14 |
|
15 |
+
model = loaded_model
|
16 |
+
tokenizer = loaded_tokenizer
|
17 |
|
18 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
19 |
|