zhaozitian commited on
Commit
7148ee7
1 Parent(s): a2e7059

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -108,16 +108,16 @@ def evaluate(
108
  )
109
  s = generation_output.sequences[0]
110
  output = tokenizer.decode(s)
111
- return output.split("### Response:")[1].strip()
112
 
113
 
114
  g = gr.Interface(
115
  fn=evaluate,
116
  inputs=[
117
  gr.components.Textbox(
118
- lines=2, label="Instruction", placeholder="Tell me about alpacas."
119
  ),
120
- gr.components.Textbox(lines=2, label="Input", placeholder="none"),
121
  gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
122
  gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
123
  gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
@@ -132,8 +132,11 @@ g = gr.Interface(
132
  label="Output",
133
  )
134
  ],
135
- title="🦙🌲 Alpaca-LoRA",
136
- description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
 
 
 
137
  )
138
  g.queue(concurrency_count=1)
139
  g.launch()
 
108
  )
109
  s = generation_output.sequences[0]
110
  output = tokenizer.decode(s)
111
+ return output.split("### Response:")[1].strip().replace('</s>', '')
112
 
113
 
114
  g = gr.Interface(
115
  fn=evaluate,
116
  inputs=[
117
  gr.components.Textbox(
118
+ lines=2, label="Instruction", placeholder="例:日本語から英語に翻訳してください。"
119
  ),
120
+ gr.components.Textbox(lines=2, label="Input", placeholder="天気がいいから、散歩しましょう。"),
121
  gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
122
  gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
123
  gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
 
132
  label="Output",
133
  )
134
  ],
135
+ title="Llama2_7b_chat_Japanese_Lora",
136
+ description="Llama-2-7b-chat-Japanese-LoRA is a multi-purpose large language model for Japanese text.\n\
137
+ This model is presented by the joint effort of Sparticle Inc. and A. I. Hakusan Inc.\n\n\
138
+ Llama-2-7b-chat-Japanese-LoRAは日本語テキストのための多目的大規模言語モデルです。\n\
139
+ このモデルは、Sparticle株式会社と株式会社白山人工知能の共同開発により発表されました。",
140
  )
141
  g.queue(concurrency_count=1)
142
  g.launch()