Spaces:
Running
Running
File size: 5,570 Bytes
e0e93c4 50f19fa 0a5ab63 5919083 ea19e17 d2258bf 91453e3 deda386 4b59495 91453e3 7aa3fde ea19e17 9411fc2 9c1a1da 9411fc2 9c1a1da 9411fc2 eef299f 044dd38 5919083 ecaa1ea eef299f 50f19fa ea19e17 91453e3 ea19e17 044dd38 ecaa1ea f4c03fc 80b9501 f4c03fc 80b9501 ecaa1ea 044dd38 73d3fc4 2d9906b 50f19fa ecaa1ea 80b9501 b3b6d77 2d9906b 50f19fa ecaa1ea 80b9501 0a5ab63 3056dd8 91453e3 80b9501 ea19e17 f4c03fc 0a5ab63 91453e3 7ebfebe 9411fc2 5919083 1c2b775 9411fc2 91453e3 1c2b775 9c1a1da 9411fc2 1c2b775 ea19e17 0a5ab63 50f19fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import gradio as gr
import models
import results
import theme
text = "<h1 style='text-align: center; color: #333333; font-size: 40px;'>AI TCO Comparison Calculator"
text2 = "<h1 style='color: #333333; font-size: 20px;'>🙌 Want to contribute?"
text3 = "Please note that the cost/request only defines the infrastructure cost for deployment. The labor cost must be added for the whole AI model service deployment TCO."
intro = f"""
<p>Discover and compare various AI model services, including SaaS and "Deploy Yourself" solutions, based on the Total Cost of Ownership for their deployment. 😊</p>
<p>Please keep in mind that our focus is on getting the AI model service up and running, not accounting for additional maintenance costs.🚀</p>
"""
contribution = f"""
<p>If you want to contribute to the calculator by <strong>adding your own AI service option</strong>, follow this <a href="https://huggingface.co/spaces/mithril-security/TCO_calculator/blob/main/How_to_contribute.md">tutorial</a> 👈. </p>
"""
formula = r"""
$CR = \frac{CIT_{1K} \times IT + COT_{1K} \times OT}{1000}$ <br>
with: <br>
$CR$ = Cost per Request <br>
$CIT_{1K}$ = Cost per 1000 Input Tokens <br>
$COT_{1K}$ = Cost per 1000 Output Tokens <br>
$IT$ = Input Tokens <br>
$OT$ = Output Tokens
"""
def on_use_case_change(use_case):
if use_case == "Summarize":
return gr.update(value=500), gr.update(value=200)
elif use_case == "Question-Answering":
return gr.update(value=300), gr.update(value=300)
else:
return gr.update(value=50), gr.update(value=10)
style = theme.Style()
with gr.Blocks(theme=style) as demo:
Models: list[models.BaseTCOModel] = [models.OpenAIModelGPT4, models.OpenAIModelGPT3_5, models.CohereModel, models.DIYLlama2Model]
model_names = [Model().get_name() for Model in Models]
gr.Markdown(value=text)
gr.Markdown(value=intro + text2)
gr.Markdown(value=contribution)
with gr.Row():
with gr.Column():
with gr.Row():
use_case = gr.Dropdown(["Summarize", "Question-Answering", "Classification"], value="Question-Answering", label=" Describe your use case ")
with gr.Accordion("Click here if you want to customize the number of input and output tokens per request", open=False):
with gr.Row():
input_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Input tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
output_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Output tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
with gr.Row(visible=False):
num_users = gr.Number(value="1000", interactive = True, label=" Number of users for your service ")
use_case.change(on_use_case_change, inputs=use_case, outputs=[input_tokens, output_tokens])
with gr.Row():
with gr.Column():
page1 = models.ModelPage(Models)
dropdown = gr.Dropdown(model_names, interactive=True, label=" First AI service option ")
with gr.Accordion("Click here for more information on the computation parameters for your first AI service option", open=False):
page1.render()
with gr.Column():
page2 = models.ModelPage(Models)
dropdown2 = gr.Dropdown(model_names, interactive=True, label=" Second AI service option ")
with gr.Accordion("Click here for more information on the computation parameters for your second AI service option", open=False):
page2.render()
results.set_shared_pages(page1, page2)
dropdown.change(page1.make_model_visible, inputs=[dropdown, use_case], outputs=page1.get_all_components())
dropdown2.change(page2.make_model_visible, inputs=[dropdown2, use_case], outputs=page2.get_all_components())
compute_tco_btn = gr.Button("Compute & Compare", size="lg", variant="primary", scale=1)
tco1, tco2, labor_cost1, labor_cost2, latency, latency2 = [gr.State() for _ in range(6)]
with gr.Row():
with gr.Accordion("Click here to see the cost/request computation formula", open=False):
tco_formula = gr.Markdown(formula)
with gr.Row(variant='panel'):
with gr.Column():
with gr.Row():
table = gr.Markdown()
with gr.Row():
info = gr.Markdown(text3)
with gr.Row():
with gr.Column(scale=1):
image = gr.Image(visible=False)
ratio = gr.Markdown()
with gr.Column(scale=2):
plot = gr.LinePlot(visible=False)
compute_tco_btn.click(results.compute_cost_per_request, inputs=page1.get_all_components_for_cost_computing() + page2.get_all_components_for_cost_computing() + [dropdown, dropdown2, input_tokens, output_tokens], outputs=[tco1, latency, labor_cost1, tco2, latency2, labor_cost2])\
.then(results.create_table, inputs=[tco1, tco2, labor_cost1, labor_cost2, dropdown, dropdown2, latency, latency2], outputs=table)\
.then(results.compare_info, inputs=[tco1, tco2, dropdown, dropdown2], outputs=[image, ratio])\
.then(results.update_plot, inputs=[tco1, tco2, dropdown, dropdown2, labor_cost1, labor_cost2], outputs=plot)
demo.launch(debug=True) |