raghavdw commited on
Commit
4b9d0f3
1 Parent(s): cc1d4ea

updated app.py generate response function

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -5,15 +5,15 @@ from transformers import AutoModelWithLMHead, AutoTokenizer
5
  # Load model directly
6
 
7
 
8
- tokenizer = AutoTokenizer.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
9
- model = AutoModelWithLMHead.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
10
 
11
  # Function for response generation
12
 
13
  def generate_query_response(prompt, max_length=200):
14
 
15
- model = model
16
- tokenizer = tokenizer
17
 
18
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
19
 
 
5
  # Load model directly
6
 
7
 
8
+ loaded_tokenizer = AutoTokenizer.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
9
+ loaded_model = AutoModelWithLMHead.from_pretrained("raghavdw/finedtuned_gpt2_medQA_model")
10
 
11
  # Function for response generation
12
 
13
  def generate_query_response(prompt, max_length=200):
14
 
15
+ model = loaded_model
16
+ tokenizer = loaded_tokenizer
17
 
18
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
19