File size: 6,937 Bytes
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
9122576
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90deb69
 
26fd6a5
ab627a5
 
9122576
faf9e56
9122576
 
 
 
26fd6a5
 
9122576
 
 
 
26fd6a5
 
9122576
 
 
26fd6a5
 
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
90deb69
26fd6a5
 
90deb69
26fd6a5
 
 
 
 
 
 
 
90deb69
26fd6a5
 
90deb69
26fd6a5
 
 
 
 
 
 
90deb69
26fd6a5
 
 
 
 
 
 
 
 
 
 
 
90deb69
26fd6a5
90deb69
 
 
 
26fd6a5
 
 
 
 
 
9122576
 
26fd6a5
 
 
 
 
 
 
 
 
5c612ca
9122576
 
5c612ca
9122576
5c612ca
9122576
5c612ca
 
 
 
 
0a4d210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c612ca
ab627a5
 
 
 
 
 
 
 
 
 
 
9122576
 
acf0e44
 
9122576
 
0a4d210
9122576
 
ab627a5
5c612ca
 
 
9122576
0a4d210
 
 
 
9122576
5c612ca
9122576
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
# -*- coding: utf-8 -*-
"""translation practice.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1KrnodZGBZrUFdaJ9FIn8IhtWtCL7peoE
"""
import requests
import gradio as gr
from dotenv import load_dotenv
import os
from openai import OpenAI
import spacy
import random

# Load environment variables from .env file
load_dotenv()

# Access the env
HF_TOKEN = os.getenv('HUGGING_FACE_TOKEN')

# openai setup
client = OpenAI(
  api_key=os.getenv('OPENAI_API_KEY')
)

# hugging face setup
#model_name = "mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf"
API_URL = f"https://api-inference.huggingface.co/models/"
#API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}

# Global variable to control debug printing
DEBUG_MODE = True

file_content = "Not yet loaded"

try:
    file_path = "issun-boshi.txt"
    # Open the file in read mode ('r')
    with open(file_path, 'r') as file:
        # Read the entire content of the file into a string
        file_content = file.read()


except Exception as e:
    print( "Error loading short story file.", str(e))
   
learning_content = file_content


def debug_print(*args, **kwargs):
    if DEBUG_MODE:
        print(*args, **kwargs)

def translate_openai(input_text):

    prompt = "Translate the following text into Japanese language: " + input_text

    response = client.chat.completions.create( # get translation from GPT
        messages=[
            {
                "role": "user",
                "content": prompt,
            }
        ],
        model="gpt-3.5-turbo",
        temperature=0 # should be the same translation every time
      )
    translation = response.choices[0].message.content
    debug_print("GPT translation:", translation)

    return translation

def assess(original_japanese, student_translation):

    try:
      # get the English translation
      generated_translation = translate_hf(original_japanese)
      debug_print("Generated translation:", generated_translation)
    except Exception as e:
      return "Error in processing translation.", str(e)
 
    try:
      prompt = (f"Evaluate the student's English translation of Japanese for accuracy and naturalness. "
            f"Original: {original_japanese}, "
            f"Reference Translation: {generated_translation}, "
            f"Student Translation: {student_translation}. "
            "Highlight errors, suggest improvements, and note any nuances. Provide concise and very simple feedback for an English language learner aimed at improving their translation skills. Where possible, give concrete examples.")

      debug_print(prompt)

      # Evaluating the student's translation attempt
      response = client.chat.completions.create(
        messages=[
            {
                "role": "user",
                "content": prompt,
            }
        ],
        model="gpt-3.5-turbo",
      )

      debug_print("Full GPT response:", response)

      debug_print("Generated translation:", generated_translation)
                  
      evaluation_feedback = response.choices[0].message.content
      
      return generated_translation, evaluation_feedback
    except Exception as e:
      return "Error in processing evaluation.", str(e)

assessor = gr.Interface(fn=assess,
                          inputs=[
                              gr.Textbox(label="Learning Content", placeholder="Enter content to focus on here", lines=4, value=learning_content),#example_Japanese),#"
                              gr.Textbox(label="Student Data", placeholder="Enter student data here", lines=4, value="EFL Learner")#"This is an example")
                          ],
                          outputs=[
                              gr.Textbox(label="Machine Generated Translation"),
                              gr.Textbox(label="Evaluation Feedback")
                          ],
                          title="Translation Practice",
                          description="Enter a Japanese sentence and your English translation attempt to receive evaluation feedback."
                          )

history_openai_format = []


def predict(message, history):

    debug_print("### History:", history_openai_format)
    
    for human, assistant in history:
        history_openai_format.append({"role": "user", "content": human })
        history_openai_format.append({"role": "assistant", "content":assistant})
    history_openai_format.append({"role": "user", "content": message})
  
    try:
        response = client.chat.completions.create(
            model='gpt-3.5-turbo',
            messages=history_openai_format,
            temperature=0.7,
            #max_tokens=150,
            stream=True
        )
    except Exception as e:
        debug_print("Error in getting LLM response.", str(e))
    try:
        partial_message = ""
        for chunk in response:
            if chunk.choices[0].delta.content is not None:
                partial_message = partial_message + chunk.choices[0].delta.content
                yield partial_message

    except Exception as e:
        debug_print("Error in streaming output", str(e))

strategies = '''
- making connections between the text and their prior knowledge;
- forming and testing hypotheses about texts;
- asking questions about  the text;
- creating mental images or visualising;
- inferring meaning from  the text;
- identifying the writer’s purpose and point of view;
- identifying the main idea or theme in the text;
- summarising the information or events in  the text;
- analysing and synthesising ideas, information, structures, and features in  the text;
- evaluating ideas and information'''




with gr.Blocks() as demo:
    with gr.Row():
        learner_data = gr.Textbox(label="Learner Data", placeholder="Enter learner data here...", lines=4, value="Honoka is a Japanese EFL student. [summary of relevant student data]")
        learning_content = gr.Textbox(label="Learning Content", placeholder="Enter learning content here...", lines=4, value=learning_content)
        teacher_prompt = gr.Textbox(label="Teacher Prompt", placeholder="Enter chat guidance here...", lines=4, 
                                    value=f"You are a professional EFL teacher. Help the student actively read the text using these strategies: {strategies}. Use simple vocabulary and short sentences a beginner would understand. Guide the conversation to discuss the Learning Content below.")
        
        # pre prompt the history_openai_format list
        history_openai_format.append({"role": "system", "content": f"{teacher_prompt.value} Learner Data: {learner_data.value}. Learning Content: {learning_content.value}. "})

    try:
        gr.ChatInterface(predict)
    except Exception as e:
        debug_print("Error in gr.ChatInterface(predict)", str(e))

demo.launch(debug=True)