Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -44,9 +44,6 @@ model1 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[1]).to(device)
|
|
44 |
tokenizer2 = AutoTokenizer.from_pretrained(MODEL_LIST[2])
|
45 |
model2 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[2]).to(device)
|
46 |
|
47 |
-
messages = [{"role": "user", "content": "List the steps to bake a chocolate cake from scratch."}]
|
48 |
-
|
49 |
-
|
50 |
#@spaces.GPU()
|
51 |
def stream_chat(
|
52 |
message: str,
|
@@ -56,7 +53,7 @@ def stream_chat(
|
|
56 |
top_p: float = 1.0,
|
57 |
top_k: int = 20,
|
58 |
penalty: float = 1.2,
|
59 |
-
choice: str = "
|
60 |
):
|
61 |
print(f'message: {message}')
|
62 |
print(f'history: {history}')
|
@@ -159,7 +156,7 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
|
|
159 |
),
|
160 |
gr.Radio(
|
161 |
["135M", "360M", "1.7B"],
|
162 |
-
value="
|
163 |
label="Load Model",
|
164 |
render=False,
|
165 |
),
|
|
|
44 |
tokenizer2 = AutoTokenizer.from_pretrained(MODEL_LIST[2])
|
45 |
model2 = AutoModelForCausalLM.from_pretrained(MODEL_LIST[2]).to(device)
|
46 |
|
|
|
|
|
|
|
47 |
#@spaces.GPU()
|
48 |
def stream_chat(
|
49 |
message: str,
|
|
|
53 |
top_p: float = 1.0,
|
54 |
top_k: int = 20,
|
55 |
penalty: float = 1.2,
|
56 |
+
choice: str = "135M"
|
57 |
):
|
58 |
print(f'message: {message}')
|
59 |
print(f'history: {history}')
|
|
|
156 |
),
|
157 |
gr.Radio(
|
158 |
["135M", "360M", "1.7B"],
|
159 |
+
value="135M",
|
160 |
label="Load Model",
|
161 |
render=False,
|
162 |
),
|