Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,10 @@ models=[
|
|
10 |
"facebook/MobileLLM-1B",
|
11 |
]
|
12 |
client_z=[]
|
|
|
|
|
13 |
|
|
|
14 |
|
15 |
def load_models(inp,new_models):
|
16 |
if not new_models:
|
@@ -21,7 +24,8 @@ def load_models(inp,new_models):
|
|
21 |
#print(new_models[inp[0]])
|
22 |
client_z.clear()
|
23 |
for z,ea in enumerate(inp):
|
24 |
-
client_z.append(InferenceClient(new_models[inp[z]]))
|
|
|
25 |
out_box[z]=(gr.update(label=new_models[inp[z]]))
|
26 |
return out_box[0],out_box[1],out_box[2],out_box[3]
|
27 |
def format_prompt_default(message, history):
|
|
|
10 |
"facebook/MobileLLM-1B",
|
11 |
]
|
12 |
client_z=[]
|
13 |
+
# Use a pipeline as a high-level helper
|
14 |
+
from transformers import pipeline
|
15 |
|
16 |
+
#pipe = pipeline("text-generation", model="facebook/MobileLLM-125M", trust_remote_code=True)
|
17 |
|
18 |
def load_models(inp,new_models):
|
19 |
if not new_models:
|
|
|
24 |
#print(new_models[inp[0]])
|
25 |
client_z.clear()
|
26 |
for z,ea in enumerate(inp):
|
27 |
+
#client_z.append(InferenceClient(new_models[inp[z]]))
|
28 |
+
client_z.append(pipeline("text-generation", model=new_models[inp[z]], trust_remote_code=True))
|
29 |
out_box[z]=(gr.update(label=new_models[inp[z]]))
|
30 |
return out_box[0],out_box[1],out_box[2],out_box[3]
|
31 |
def format_prompt_default(message, history):
|