lucas-w commited on
Commit
73a863b
β€’
1 Parent(s): dc11b12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -52,14 +52,14 @@ def givetext(input_text,lmodel,ltokenizer):
52
 
53
  model_input = ltokenizer(eval_prompt, return_tensors="pt").to("cpu")
54
 
55
- PRINT(" BEFORE EVAL LMODEL")
56
 
57
  lmodel.eval()
58
 
59
- PRINT("BEFORE DOING TORCH.NO_GRAD()")
60
 
61
  with torch.no_grad():
62
- PRINT("BEFORE RETURNING")
63
 
64
  return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=1000)[0], skip_special_tokens=True))
65
  #return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True))
@@ -68,7 +68,7 @@ def givetext(input_text,lmodel,ltokenizer):
68
  #txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
69
 
70
  def mental_chat(message, history):
71
- PRINT("BEFORE CALLING GIVETEXT")
72
  return givetext(message,newmodel,newtokenizer)
73
 
74
  demo = gr.ChatInterface(mental_chat)
 
52
 
53
  model_input = ltokenizer(eval_prompt, return_tensors="pt").to("cpu")
54
 
55
+ print(" BEFORE EVAL LMODEL")
56
 
57
  lmodel.eval()
58
 
59
+ print("BEFORE DOING TORCH.NO_GRAD()")
60
 
61
  with torch.no_grad():
62
+ print("BEFORE RETURNING")
63
 
64
  return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=1000)[0], skip_special_tokens=True))
65
  #return (ltokenizer.decode(lmodel.generate(**model_input, max_new_tokens=100)[0], skip_special_tokens=True))
 
68
  #txt1 = "My name is {fname}, I'm {age}".format(fname = "John", age = 36)
69
 
70
  def mental_chat(message, history):
71
+ print("BEFORE CALLING GIVETEXT")
72
  return givetext(message,newmodel,newtokenizer)
73
 
74
  demo = gr.ChatInterface(mental_chat)