update
Browse files- app.py +24 -9
- leaderboard_table_20240404.csv +94 -0
app.py
CHANGED
@@ -211,17 +211,25 @@ def create_ranking_str(ranking, ranking_difference):
|
|
211 |
if ranking_difference > 0:
|
212 |
return f"{int(ranking)} (\u2191{int(ranking_difference)})"
|
213 |
elif ranking_difference < 0:
|
214 |
-
return f"{int(ranking)} ({int(-ranking_difference)}
|
215 |
else:
|
216 |
return f"{int(ranking)}"
|
217 |
|
218 |
def get_arena_table(arena_df, model_table_df, arena_subset_df=None):
|
219 |
arena_df = arena_df.sort_values(by=["rating"], ascending=False)
|
220 |
-
arena_df = arena_df.sort_values(by=["final_ranking"], ascending=True)
|
|
|
|
|
|
|
221 |
# sort by rating
|
222 |
-
if arena_subset_df is not None:
|
|
|
|
|
223 |
arena_subset_df = arena_subset_df.sort_values(by=["rating"], ascending=False)
|
224 |
-
arena_subset_df = arena_subset_df.sort_values(by=["final_ranking"], ascending=True)
|
|
|
|
|
|
|
225 |
# join arena_df and arena_subset_df on index
|
226 |
arena_df = arena_subset_df.join(arena_df["final_ranking"], rsuffix="_global", how="inner")
|
227 |
arena_df['ranking_difference'] = arena_df['final_ranking_global'] - arena_df['final_ranking']
|
@@ -294,6 +302,7 @@ def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=Fa
|
|
294 |
elo_chinese_results = elo_results["chinese"]
|
295 |
elo_long_results = elo_results["long"]
|
296 |
elo_english_results = elo_results["english"]
|
|
|
297 |
elo_results = elo_results["full"]
|
298 |
|
299 |
p1 = elo_results["win_fraction_heatmap"]
|
@@ -304,6 +313,7 @@ def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=Fa
|
|
304 |
arena_chinese_df = elo_chinese_results["leaderboard_table_df"]
|
305 |
arena_long_df = elo_long_results["leaderboard_table_df"]
|
306 |
arena_english_df = elo_english_results["leaderboard_table_df"]
|
|
|
307 |
default_md = make_default_md(arena_df, elo_results)
|
308 |
|
309 |
md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
|
@@ -322,15 +332,17 @@ def build_leaderboard_tab(elo_results_file, leaderboard_table_file, show_plot=Fa
|
|
322 |
overall_rating = gr.Button("Overall")
|
323 |
# update_overall_rating_df = lambda _: get_arena_table(arena_df, model_table_df)
|
324 |
update_overall_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, None, elo_results)
|
|
|
|
|
|
|
|
|
|
|
325 |
english_rating = gr.Button("English")
|
326 |
update_english_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_english_df, elo_english_results)
|
327 |
# update_english_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_english_df)
|
328 |
chinese_rating = gr.Button("Chinese")
|
329 |
update_chinese_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_chinese_df, elo_chinese_results)
|
330 |
# update_chinese_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_chinese_df)
|
331 |
-
long_context_rating = gr.Button("Long Context")
|
332 |
-
update_long_context_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_long_df, elo_long_results)
|
333 |
-
# update_long_context_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_long_df)
|
334 |
elo_display_df = gr.Dataframe(
|
335 |
headers=[
|
336 |
"Rank",
|
@@ -428,9 +440,10 @@ See Figure 3 below for visualization of the confidence intervals. Code to recrea
|
|
428 |
plot_4 = gr.Plot(p4, show_label=False)
|
429 |
|
430 |
overall_rating.click(fn=update_overall_rating_df, inputs=overall_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
|
|
|
|
431 |
english_rating.click(fn=update_english_rating_df, inputs=english_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
432 |
chinese_rating.click(fn=update_chinese_rating_df, inputs=chinese_rating ,outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
433 |
-
long_context_rating.click(fn=update_long_context_rating_df, inputs=long_context_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
434 |
|
435 |
with gr.Accordion(
|
436 |
"π Citation",
|
@@ -551,6 +564,8 @@ def build_demo(elo_results_file, leaderboard_table_file):
|
|
551 |
if __name__ == "__main__":
|
552 |
parser = argparse.ArgumentParser()
|
553 |
parser.add_argument("--share", action="store_true")
|
|
|
|
|
554 |
args = parser.parse_args()
|
555 |
|
556 |
elo_result_files = glob.glob("elo_results_*.pkl")
|
@@ -562,4 +577,4 @@ if __name__ == "__main__":
|
|
562 |
leaderboard_table_file = leaderboard_table_files[-1]
|
563 |
|
564 |
demo = build_demo(elo_result_file, leaderboard_table_file)
|
565 |
-
demo.launch(share=args.share)
|
|
|
211 |
if ranking_difference > 0:
|
212 |
return f"{int(ranking)} (\u2191{int(ranking_difference)})"
|
213 |
elif ranking_difference < 0:
|
214 |
+
return f"{int(ranking)} (\u2193{int(-ranking_difference)})"
|
215 |
else:
|
216 |
return f"{int(ranking)}"
|
217 |
|
218 |
def get_arena_table(arena_df, model_table_df, arena_subset_df=None):
|
219 |
arena_df = arena_df.sort_values(by=["rating"], ascending=False)
|
220 |
+
# arena_df = arena_df.sort_values(by=["final_ranking"], ascending=True)
|
221 |
+
arena_df = arena_df[arena_df["num_battles"] > 2000]
|
222 |
+
|
223 |
+
arena_df["final_ranking"] = range(1, len(arena_df) + 1)
|
224 |
# sort by rating
|
225 |
+
if arena_subset_df is not None:
|
226 |
+
# filter out models not in the arena_df
|
227 |
+
arena_subset_df = arena_subset_df[arena_subset_df.index.isin(arena_df.index)]
|
228 |
arena_subset_df = arena_subset_df.sort_values(by=["rating"], ascending=False)
|
229 |
+
# arena_subset_df = arena_subset_df.sort_values(by=["final_ranking"], ascending=True)
|
230 |
+
|
231 |
+
# assign ranking by the order
|
232 |
+
arena_subset_df["final_ranking"] = range(1, len(arena_subset_df) + 1)
|
233 |
# join arena_df and arena_subset_df on index
|
234 |
arena_df = arena_subset_df.join(arena_df["final_ranking"], rsuffix="_global", how="inner")
|
235 |
arena_df['ranking_difference'] = arena_df['final_ranking_global'] - arena_df['final_ranking']
|
|
|
302 |
elo_chinese_results = elo_results["chinese"]
|
303 |
elo_long_results = elo_results["long"]
|
304 |
elo_english_results = elo_results["english"]
|
305 |
+
elo_coding_results = elo_results["coding"]
|
306 |
elo_results = elo_results["full"]
|
307 |
|
308 |
p1 = elo_results["win_fraction_heatmap"]
|
|
|
313 |
arena_chinese_df = elo_chinese_results["leaderboard_table_df"]
|
314 |
arena_long_df = elo_long_results["leaderboard_table_df"]
|
315 |
arena_english_df = elo_english_results["leaderboard_table_df"]
|
316 |
+
arena_coding_df = elo_coding_results["leaderboard_table_df"]
|
317 |
default_md = make_default_md(arena_df, elo_results)
|
318 |
|
319 |
md_1 = gr.Markdown(default_md, elem_id="leaderboard_markdown")
|
|
|
332 |
overall_rating = gr.Button("Overall")
|
333 |
# update_overall_rating_df = lambda _: get_arena_table(arena_df, model_table_df)
|
334 |
update_overall_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, None, elo_results)
|
335 |
+
coding_rating = gr.Button("Coding")
|
336 |
+
update_coding_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_coding_df, elo_coding_results)
|
337 |
+
long_context_rating = gr.Button("Long Conversation")
|
338 |
+
update_long_context_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_long_df, elo_long_results)
|
339 |
+
# update_long_context_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_long_df)
|
340 |
english_rating = gr.Button("English")
|
341 |
update_english_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_english_df, elo_english_results)
|
342 |
# update_english_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_english_df)
|
343 |
chinese_rating = gr.Button("Chinese")
|
344 |
update_chinese_rating_df = lambda x: update_leaderboard_and_plots(x, arena_df, model_table_df, arena_chinese_df, elo_chinese_results)
|
345 |
# update_chinese_rating_df = lambda _: get_arena_table(arena_df, model_table_df, arena_chinese_df)
|
|
|
|
|
|
|
346 |
elo_display_df = gr.Dataframe(
|
347 |
headers=[
|
348 |
"Rank",
|
|
|
440 |
plot_4 = gr.Plot(p4, show_label=False)
|
441 |
|
442 |
overall_rating.click(fn=update_overall_rating_df, inputs=overall_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
443 |
+
coding_rating.click(fn=update_coding_rating_df, inputs=coding_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
444 |
+
long_context_rating.click(fn=update_long_context_rating_df, inputs=long_context_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
445 |
english_rating.click(fn=update_english_rating_df, inputs=english_rating, outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
446 |
chinese_rating.click(fn=update_chinese_rating_df, inputs=chinese_rating ,outputs=[elo_display_df, plot_1, plot_2, plot_3, plot_4, more_stats_md, leaderboard_markdown])
|
|
|
447 |
|
448 |
with gr.Accordion(
|
449 |
"π Citation",
|
|
|
564 |
if __name__ == "__main__":
|
565 |
parser = argparse.ArgumentParser()
|
566 |
parser.add_argument("--share", action="store_true")
|
567 |
+
parser.add_argument("--host", default="0.0.0.0")
|
568 |
+
parser.add_argument("--port", type=int, default=7860)
|
569 |
args = parser.parse_args()
|
570 |
|
571 |
elo_result_files = glob.glob("elo_results_*.pkl")
|
|
|
577 |
leaderboard_table_file = leaderboard_table_files[-1]
|
578 |
|
579 |
demo = build_demo(elo_result_file, leaderboard_table_file)
|
580 |
+
demo.launch(share=args.share, server_name=args.host, server_port=args.port)
|
leaderboard_table_20240404.csv
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
key,Model,MT-bench (score),MMLU,Knowledge cutoff date,License,Organization,Link
|
2 |
+
wizardlm-30b,WizardLM-30B,7.01,0.587,2023/6,Non-commercial,Microsoft,https://huggingface.co/WizardLM/WizardLM-30B-V1.0
|
3 |
+
vicuna-13b-16k,Vicuna-13B-16k,6.92,0.545,2023/7,Llama 2 Community,LMSYS,https://huggingface.co/lmsys/vicuna-13b-v1.5-16k
|
4 |
+
wizardlm-13b-v1.1,WizardLM-13B-v1.1,6.76,0.500,2023/7,Non-commercial,Microsoft,https://huggingface.co/WizardLM/WizardLM-13B-V1.1
|
5 |
+
tulu-30b,Tulu-30B,6.43,0.581,2023/6,Non-commercial,AllenAI/UW,https://huggingface.co/allenai/tulu-30b
|
6 |
+
guanaco-65b,Guanaco-65B,6.41,0.621,2023/5,Non-commercial,UW,https://huggingface.co/timdettmers/guanaco-65b-merged
|
7 |
+
openassistant-llama-30b,OpenAssistant-LLaMA-30B,6.41,0.560,2023/4,Non-commercial,OpenAssistant,https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor
|
8 |
+
wizardlm-13b-v1.0,WizardLM-13B-v1.0,6.35,0.523,2023/5,Non-commercial,Microsoft,https://huggingface.co/WizardLM/WizardLM-13B-V1.0
|
9 |
+
vicuna-7b-16k,Vicuna-7B-16k,6.22,0.485,2023/7,Llama 2 Community,LMSYS,https://huggingface.co/lmsys/vicuna-7b-v1.5-16k
|
10 |
+
baize-v2-13b,Baize-v2-13B,5.75,0.489,2023/4,Non-commercial,UCSD,https://huggingface.co/project-baize/baize-v2-13b
|
11 |
+
xgen-7b-8k-inst,XGen-7B-8K-Inst,5.55,0.421,2023/7,Non-commercial,Salesforce,https://huggingface.co/Salesforce/xgen-7b-8k-inst
|
12 |
+
nous-hermes-13b,Nous-Hermes-13B,5.51,0.493,2023/6,Non-commercial,NousResearch,https://huggingface.co/NousResearch/Nous-Hermes-13b
|
13 |
+
mpt-30b-instruct,MPT-30B-Instruct,5.22,0.478,2023/6,CC-BY-SA 3.0,MosaicML,https://huggingface.co/mosaicml/mpt-30b-instruct
|
14 |
+
falcon-40b-instruct,Falcon-40B-Instruct,5.17,0.547,2023/5,Apache 2.0,TII,https://huggingface.co/tiiuae/falcon-40b-instruct
|
15 |
+
h2o-oasst-openllama-13b,H2O-Oasst-OpenLLaMA-13B,4.63,0.428,2023/6,Apache 2.0,h2oai,https://huggingface.co/h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b
|
16 |
+
gpt-4-1106-preview,GPT-4-1106-preview,9.32,-,2023/4,Proprietary,OpenAI,https://openai.com/blog/new-models-and-developer-products-announced-at-devday
|
17 |
+
gpt-4-0314,GPT-4-0314,8.96,0.864,2021/9,Proprietary,OpenAI,https://openai.com/research/gpt-4
|
18 |
+
claude-1,Claude-1,7.90,0.770,-,Proprietary,Anthropic,https://www.anthropic.com/index/introducing-claude
|
19 |
+
gpt-4-0613,GPT-4-0613,9.18,-,2021/9,Proprietary,OpenAI,https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
|
20 |
+
claude-2.0,Claude-2.0,8.06,0.785,-,Proprietary,Anthropic,https://www.anthropic.com/index/claude-2
|
21 |
+
claude-2.1,Claude-2.1,8.18,-,-,Proprietary,Anthropic,https://www.anthropic.com/index/claude-2-1
|
22 |
+
gpt-3.5-turbo-0613,GPT-3.5-Turbo-0613,8.39,-,2021/9,Proprietary,OpenAI,https://platform.openai.com/docs/models/gpt-3-5
|
23 |
+
mixtral-8x7b-instruct-v0.1,Mixtral-8x7b-Instruct-v0.1,8.30,0.706,2023/12,Apache 2.0,Mistral,https://mistral.ai/news/mixtral-of-experts/
|
24 |
+
claude-instant-1,Claude-Instant-1,7.85,0.734,-,Proprietary,Anthropic,https://www.anthropic.com/index/introducing-claude
|
25 |
+
gpt-3.5-turbo-0314,GPT-3.5-Turbo-0314,7.94,0.700,2021/9,Proprietary,OpenAI,https://platform.openai.com/docs/models/gpt-3-5
|
26 |
+
tulu-2-dpo-70b,Tulu-2-DPO-70B,7.89,-,2023/11,AI2 ImpACT Low-risk,AllenAI/UW,https://huggingface.co/allenai/tulu-2-dpo-70b
|
27 |
+
yi-34b-chat,Yi-34B-Chat,-,0.735,2023/6,Yi License,01 AI,https://huggingface.co/01-ai/Yi-34B-Chat
|
28 |
+
gemini-pro,Gemini Pro,-,0.718,2023/4,Proprietary,Google,https://blog.google/technology/ai/gemini-api-developers-cloud/
|
29 |
+
gemini-pro-dev-api,Gemini Pro (Dev API),-,0.718,2023/4,Proprietary,Google,https://ai.google.dev/docs/gemini_api_overview
|
30 |
+
bard-jan-24-gemini-pro,Bard (Gemini Pro),-,-,Online,Proprietary,Google,https://bard.google.com/
|
31 |
+
wizardlm-70b,WizardLM-70B-v1.0,7.71,0.637,2023/8,Llama 2 Community,Microsoft,https://huggingface.co/WizardLM/WizardLM-70B-V1.0
|
32 |
+
vicuna-33b,Vicuna-33B,7.12,0.592,2023/8,Non-commercial,LMSYS,https://huggingface.co/lmsys/vicuna-33b-v1.3
|
33 |
+
starling-lm-7b-alpha,Starling-LM-7B-alpha,8.09,0.639,2023/11,CC-BY-NC-4.0,UC Berkeley,https://huggingface.co/berkeley-nest/Starling-LM-7B-alpha
|
34 |
+
pplx-70b-online,pplx-70b-online,-,-,Online,Proprietary,Perplexity AI,https://blog.perplexity.ai/blog/introducing-pplx-online-llms
|
35 |
+
openchat-3.5,OpenChat-3.5,7.81,0.643,2023/11,Apache-2.0,OpenChat,https://huggingface.co/openchat/openchat_3.5
|
36 |
+
openhermes-2.5-mistral-7b,OpenHermes-2.5-Mistral-7b,-,-,2023/11,Apache-2.0,NousResearch,https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
|
37 |
+
gpt-3.5-turbo-1106,GPT-3.5-Turbo-1106,8.32,-,2021/9,Proprietary,OpenAI,https://platform.openai.com/docs/models/gpt-3-5
|
38 |
+
llama-2-70b-chat,Llama-2-70b-chat,6.86,0.630,2023/7,Llama 2 Community,Meta,https://huggingface.co/meta-llama/Llama-2-70b-chat-hf
|
39 |
+
solar-10.7b-instruct-v1.0,SOLAR-10.7B-Instruct-v1.0,7.58,0.662,2023/11,CC-BY-NC-4.0,Upstage AI,https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0
|
40 |
+
dolphin-2.2.1-mistral-7b,Dolphin-2.2.1-Mistral-7B,-,-,2023/10,Apache-2.0,Cognitive Computations,https://huggingface.co/ehartford/dolphin-2.2.1-mistral-7b
|
41 |
+
wizardlm-13b,WizardLM-13b-v1.2,7.20,0.527,2023/7,Llama 2 Community,Microsoft,https://huggingface.co/WizardLM/WizardLM-13B-V1.2
|
42 |
+
zephyr-7b-beta,Zephyr-7b-beta,7.34,0.614,2023/10,MIT,HuggingFace,https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
|
43 |
+
mpt-30b-chat,MPT-30B-chat,6.39,0.504,2023/6,CC-BY-NC-SA-4.0,MosaicML,https://huggingface.co/mosaicml/mpt-30b-chat
|
44 |
+
vicuna-13b,Vicuna-13B,6.57,0.558,2023/7,Llama 2 Community,LMSYS,https://huggingface.co/lmsys/vicuna-13b-v1.5
|
45 |
+
qwen-14b-chat,Qwen-14B-Chat,6.96,0.665,2023/8,Qianwen LICENSE,Alibaba,https://huggingface.co/Qwen/Qwen-14B-Chat
|
46 |
+
zephyr-7b-alpha,Zephyr-7b-alpha,6.88,-,2023/10,MIT,HuggingFace,https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha
|
47 |
+
codellama-34b-instruct,CodeLlama-34B-instruct,-,0.537,2023/7,Llama 2 Community,Meta,https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf
|
48 |
+
falcon-180b-chat,falcon-180b-chat,-,0.680,2023/9,Falcon-180B TII License,TII,https://huggingface.co/tiiuae/falcon-180B-chat
|
49 |
+
guanaco-33b,Guanaco-33B,6.53,0.576,2023/5,Non-commercial,UW,https://huggingface.co/timdettmers/guanaco-33b-merged
|
50 |
+
llama-2-13b-chat,Llama-2-13b-chat,6.65,0.536,2023/7,Llama 2 Community,Meta,https://huggingface.co/meta-llama/Llama-2-13b-chat-hf
|
51 |
+
mistral-7b-instruct,Mistral-7B-Instruct-v0.1,6.84,0.554,2023/9,Apache 2.0,Mistral,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
|
52 |
+
pplx-7b-online,pplx-7b-online,-,-,Online,Proprietary,Perplexity AI,https://blog.perplexity.ai/blog/introducing-pplx-online-llms
|
53 |
+
llama-2-7b-chat,Llama-2-7b-chat,6.27,0.458,2023/7,Llama 2 Community,Meta,https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
|
54 |
+
vicuna-7b,Vicuna-7B,6.17,0.498,2023/7,Llama 2 Community,LMSYS,https://huggingface.co/lmsys/vicuna-7b-v1.5
|
55 |
+
palm-2,PaLM-Chat-Bison-001,6.40,-,2021/6,Proprietary,Google,https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models#foundation_models
|
56 |
+
koala-13b,Koala-13B,5.35,0.447,2023/4,Non-commercial,UC Berkeley,https://bair.berkeley.edu/blog/2023/04/03/koala/
|
57 |
+
chatglm3-6b,ChatGLM3-6B,-,-,2023/10,Apache-2.0,Tsinghua,https://huggingface.co/THUDM/chatglm3-6b
|
58 |
+
gpt4all-13b-snoozy,GPT4All-13B-Snoozy,5.41,0.430,2023/3,Non-commercial,Nomic AI,https://huggingface.co/nomic-ai/gpt4all-13b-snoozy
|
59 |
+
mpt-7b-chat,MPT-7B-Chat,5.42,0.320,2023/5,CC-BY-NC-SA-4.0,MosaicML,https://huggingface.co/mosaicml/mpt-7b-chat
|
60 |
+
chatglm2-6b,ChatGLM2-6B,4.96,0.455,2023/6,Apache-2.0,Tsinghua,https://huggingface.co/THUDM/chatglm2-6b
|
61 |
+
RWKV-4-Raven-14B,RWKV-4-Raven-14B,3.98,0.256,2023/4,Apache 2.0,RWKV,https://huggingface.co/BlinkDL/rwkv-4-raven
|
62 |
+
alpaca-13b,Alpaca-13B,4.53,0.481,2023/3,Non-commercial,Stanford,https://crfm.stanford.edu/2023/03/13/alpaca.html
|
63 |
+
oasst-pythia-12b,OpenAssistant-Pythia-12B,4.32,0.270,2023/4,Apache 2.0,OpenAssistant,https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5
|
64 |
+
chatglm-6b,ChatGLM-6B,4.50,0.361,2023/3,Non-commercial,Tsinghua,https://huggingface.co/THUDM/chatglm-6b
|
65 |
+
fastchat-t5-3b,FastChat-T5-3B,3.04,0.477,2023/4,Apache 2.0,LMSYS,https://huggingface.co/lmsys/fastchat-t5-3b-v1.0
|
66 |
+
stablelm-tuned-alpha-7b,StableLM-Tuned-Alpha-7B,2.75,0.244,2023/4,CC-BY-NC-SA-4.0,Stability AI,https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b
|
67 |
+
dolly-v2-12b,Dolly-V2-12B,3.28,0.257,2023/4,MIT,Databricks,https://huggingface.co/databricks/dolly-v2-12b
|
68 |
+
llama-13b,LLaMA-13B,2.61,0.470,2023/2,Non-commercial,Meta,https://arxiv.org/abs/2302.13971
|
69 |
+
mistral-medium,Mistral Medium,8.61,0.753,-,Proprietary,Mistral,https://mistral.ai/news/la-plateforme/
|
70 |
+
llama2-70b-steerlm-chat,NV-Llama2-70B-SteerLM-Chat,7.54,0.685,2023/11,Llama 2 Community,Nvidia,https://huggingface.co/nvidia/Llama2-70B-SteerLM-Chat
|
71 |
+
stripedhyena-nous-7b,StripedHyena-Nous-7B,-,-,2023/12,Apache 2.0,Together AI,https://huggingface.co/togethercomputer/StripedHyena-Nous-7B
|
72 |
+
deepseek-llm-67b-chat,DeepSeek-LLM-67B-Chat,-,0.713,2023/11,DeepSeek License,DeepSeek AI,https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat
|
73 |
+
gpt-4-0125-preview,GPT-4-0125-preview,-,-,2023/12,Proprietary,OpenAI,https://openai.com/blog/new-models-and-developer-products-announced-at-devday
|
74 |
+
qwen1.5-72b-chat,Qwen1.5-72B-Chat,8.61,0.775,2024/2,Qianwen LICENSE,Alibaba,https://qwenlm.github.io/blog/qwen1.5/
|
75 |
+
qwen1.5-7b-chat,Qwen1.5-7B-Chat,7.6,0.610,2024/2,Qianwen LICENSE,Alibaba,https://qwenlm.github.io/blog/qwen1.5/
|
76 |
+
qwen1.5-4b-chat,Qwen1.5-4B-Chat,-,0.561,2024/2,Qianwen LICENSE,Alibaba,https://qwenlm.github.io/blog/qwen1.5/
|
77 |
+
openchat-3.5-0106,OpenChat-3.5-0106,7.8,0.658,2024/1,Apache-2.0,OpenChat,https://huggingface.co/openchat/openchat-3.5-0106
|
78 |
+
nous-hermes-2-mixtral-8x7b-dpo,Nous-Hermes-2-Mixtral-8x7B-DPO,-,-,2024/1,Apache-2.0,NousResearch,https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
|
79 |
+
gpt-3.5-turbo-0125,GPT-3.5-Turbo-0125,-,-,2021/9,Proprietary,OpenAI,https://platform.openai.com/docs/models/gpt-3-5-turbo
|
80 |
+
mistral-next,Mistral-Next,-,-,-,Proprietary,Mistral,https://chat.mistral.ai/chat
|
81 |
+
mistral-large-2402,Mistral-Large-2402,-,0.812,-,Proprietary,Mistral,https://mistral.ai/news/mistral-large/
|
82 |
+
gemma-7b-it,Gemma-7B-it,-,0.643,2024/2,Gemma license,Google,https://huggingface.co/google/gemma-7b-it
|
83 |
+
gemma-2b-it,Gemma-2B-it,-,0.423,2024/2,Gemma license,Google,https://huggingface.co/google/gemma-2b-it
|
84 |
+
mistral-7b-instruct-v0.2,Mistral-7B-Instruct-v0.2,7.6,-,2023/12,Apache-2.0,Mistral,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
|
85 |
+
claude-3-sonnet-20240229,Claude 3 Sonnet,-,0.790,2023/8,Proprietary,Anthropic,https://www.anthropic.com/news/claude-3-family
|
86 |
+
claude-3-opus-20240229,Claude 3 Opus,-,0.868,2023/8,Proprietary,Anthropic,https://www.anthropic.com/news/claude-3-family
|
87 |
+
codellama-70b-instruct,CodeLlama-70B-instruct,-,-,2024/1,Llama 2 Community,Meta,https://huggingface.co/codellama/CodeLlama-70b-hf
|
88 |
+
olmo-7b-instruct,OLMo-7B-instruct,-,-,2024/2,Apache-2.0,Allen AI,https://huggingface.co/allenai/OLMo-7B-Instruct
|
89 |
+
claude-3-haiku-20240307,Claude 3 Haiku,-,0.752,2023/8,Proprietary,Anthropic,https://www.anthropic.com/news/claude-3-family
|
90 |
+
starling-lm-7b-beta,Starling-LM-7B-beta,8.12,-,2024/3,Apache-2.0,Nexusflow,https://huggingface.co/Nexusflow/Starling-LM-7B-beta
|
91 |
+
dbrx-instruct,DBRX-instruct,-,-,2024/3,Apache-2.0,Databricks,-
|
92 |
+
command-r,Command R,-,-,2024/3,Apache-2.0,Cohere,-
|
93 |
+
qwen1.5-14b-chat,Qwen1.5-14B-Chat,-,-,2024/2,Qianwen LICENSE,Alibaba,https://qwenlm.github.io/blog/qwen1.5/
|
94 |
+
qwen1.5-32b-chat,Qwen1.5-32B-Chat,-,-,2024/2,Qianwen LICENSE,Alibaba,https://qwenlm.github.io/blog/qwen1.5/
|