lucas-w commited on
Commit
dc11b12
β€’
1 Parent(s): 1d31b76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -0
app.py CHANGED
@@ -47,10 +47,20 @@ def givetext(input_text,lmodel,ltokenizer):
47
  eval_prompt_pt2="\n\n\n### Response:\n"
48
  eval_prompt=eval_prompt_pt1+input_text+eval_prompt_pt2
49
  print(eval_prompt,"\n\n")
 
 
 
50
  model_input = ltokenizer(eval_prompt, return_tensors="pt").to("cpu")
 
 
51
 
52
  lmodel.eval()
 
 
 
53
  with torch.no_grad():
 
 
54
  return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=1000)[0], skip_special_tokens=True))
55
  #return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True))
56
  except Exception as error:
@@ -58,6 +68,7 @@ def givetext(input_text,lmodel,ltokenizer):
58
  #txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
59
 
60
  def mental_chat(message, history):
 
61
  return givetext(message,newmodel,newtokenizer)
62
 
63
  demo = gr.ChatInterface(mental_chat)
 
47
  eval_prompt_pt2="\n\n\n### Response:\n"
48
  eval_prompt=eval_prompt_pt1+input_text+eval_prompt_pt2
49
  print(eval_prompt,"\n\n")
50
+
51
+ print("BEFORE PROCESSING MODEL INPUT")
52
+
53
  model_input = ltokenizer(eval_prompt, return_tensors="pt").to("cpu")
54
+
55
+ PRINT(" BEFORE EVAL LMODEL")
56
 
57
  lmodel.eval()
58
+
59
+ PRINT("BEFORE DOING TORCH.NO_GRAD()")
60
+
61
  with torch.no_grad():
62
+ PRINT("BEFORE RETURNING")
63
+
64
  return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=1000)[0], skip_special_tokens=True))
65
  #return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True))
66
  except Exception as error:
 
68
  #txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
69
 
70
  def mental_chat(message, history):
71
+ PRINT("BEFORE CALLING GIVETEXT")
72
  return givetext(message,newmodel,newtokenizer)
73
 
74
  demo = gr.ChatInterface(mental_chat)