steve7909 commited on
Commit
0a4d210
1 Parent(s): faf9e56

update name

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +35 -19
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: EvaLingo
3
  app_file: app.py
4
  sdk: gradio
5
  sdk_version: 4.23.0
 
1
  ---
2
+ title: KAKAPO EFL Chatbot
3
  app_file: app.py
4
  sdk: gradio
5
  sdk_version: 4.23.0
app.py CHANGED
@@ -34,8 +34,6 @@ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
34
  # Global variable to control debug printing
35
  DEBUG_MODE = True
36
 
37
-
38
-
39
  try:
40
  file_path = "issun-boshi.txt"
41
  # Open the file in read mode ('r')
@@ -54,8 +52,6 @@ def debug_print(*args, **kwargs):
54
  if DEBUG_MODE:
55
  print(*args, **kwargs)
56
 
57
-
58
-
59
  def translate_openai(input_text):
60
 
61
  prompt = "Translate the following text into Japanese language: " + input_text
@@ -139,32 +135,52 @@ def predict(message, history):
139
  history_openai_format.append({"role": "assistant", "content":assistant})
140
  history_openai_format.append({"role": "user", "content": message})
141
 
142
- response = client.chat.completions.create(
143
- model='gpt-3.5-turbo',
144
- messages=history_openai_format,
145
- temperature=1.0, #?
146
- #max_tokens=150,
147
- stream=True
148
- )
 
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- partial_message = ""
151
- for chunk in response:
152
- if chunk.choices[0].delta.content is not None:
153
- partial_message = partial_message + chunk.choices[0].delta.content
154
- yield partial_message
 
 
 
 
 
155
 
156
 
157
  with gr.Blocks() as demo:
158
  with gr.Row():
159
- learner_data = gr.Textbox(label="Learner Data", placeholder="Enter learner data here...", lines=4, value="Honoka is a Japanese EFL student.")
160
  learning_content = gr.Textbox(label="Learning Content", placeholder="Enter learning content here...", lines=4, value=learning_content)
161
  teacher_prompt = gr.Textbox(label="Teacher Prompt", placeholder="Enter chat guidance here...", lines=4,
162
- value="You are a professional EFL teacher. Guide the conversation to discuss the Learning Content below.")
163
 
164
  # pre prompt the history_openai_format list
165
  history_openai_format.append({"role": "system", "content": f"{teacher_prompt.value} Learner Data: {learner_data.value}. Learning Content: {learning_content.value}. "})
166
 
167
- gr.ChatInterface(predict)
 
 
 
168
 
169
  demo.launch(debug=True)
170
 
 
34
  # Global variable to control debug printing
35
  DEBUG_MODE = True
36
 
 
 
37
  try:
38
  file_path = "issun-boshi.txt"
39
  # Open the file in read mode ('r')
 
52
  if DEBUG_MODE:
53
  print(*args, **kwargs)
54
 
 
 
55
  def translate_openai(input_text):
56
 
57
  prompt = "Translate the following text into Japanese language: " + input_text
 
135
  history_openai_format.append({"role": "assistant", "content":assistant})
136
  history_openai_format.append({"role": "user", "content": message})
137
 
138
+ try:
139
+ response = client.chat.completions.create(
140
+ model='gpt-3.5-turbo',
141
+ messages=history_openai_format,
142
+ temperature=0.7,
143
+ #max_tokens=150,
144
+ stream=True
145
+ )
146
+ except Exception as e:
147
+ debug_print("Error in getting LLM response.", str(e))
148
+ try:
149
+ partial_message = ""
150
+ for chunk in response:
151
+ if chunk.choices[0].delta.content is not None:
152
+ partial_message = partial_message + chunk.choices[0].delta.content
153
+ yield partial_message
154
+
155
+ except Exception as e:
156
+ debug_print("Error in streaming output", str(e))
157
 
158
+ strategies = '''making connections between the text and their prior knowledge;
159
+ forming and testing hypotheses about texts;
160
+ asking questions about the text;
161
+ creating mental images or visualising;
162
+ inferring meaning from the text;
163
+ identifying the writer’s purpose and point of view;
164
+ identifying the main idea or theme in the text;
165
+ summarising the information or events in the text;
166
+ analysing and synthesising ideas, information, structures, and features in the text;
167
+ evaluating ideas and information'''
168
 
169
 
170
  with gr.Blocks() as demo:
171
  with gr.Row():
172
+ learner_data = gr.Textbox(label="Learner Data", placeholder="Enter learner data here...", lines=4, value="Honoka is a Japanese EFL student. [summary of relevant student data]")
173
  learning_content = gr.Textbox(label="Learning Content", placeholder="Enter learning content here...", lines=4, value=learning_content)
174
  teacher_prompt = gr.Textbox(label="Teacher Prompt", placeholder="Enter chat guidance here...", lines=4,
175
+ value=f"You are a professional EFL teacher. Help the student actively read the text using these strategies: {strategies}. Guide the conversation to discuss the Learning Content below.")
176
 
177
  # pre prompt the history_openai_format list
178
  history_openai_format.append({"role": "system", "content": f"{teacher_prompt.value} Learner Data: {learner_data.value}. Learning Content: {learning_content.value}. "})
179
 
180
+ try:
181
+ gr.ChatInterface(predict)
182
+ except Exception as e:
183
+ debug_print("Error in gr.ChatInterface(predict)", str(e))
184
 
185
  demo.launch(debug=True)
186