Spaces:
Running
Running
Corrections
Browse files
app.py
CHANGED
@@ -3,16 +3,14 @@ import models
|
|
3 |
import results
|
4 |
import theme
|
5 |
|
6 |
-
text = "<h1 style='text-align: center; color: #333333; font-size: 40px;'>AI TCO Comparison Calculator"
|
7 |
-
text2 = "<h1 style='color: #333333; font-size: 20px;'>π
|
8 |
text3 = "Please note that the cost/request only defines the infrastructure cost for deployment. The labor cost must be added for the whole AI model service deployment TCO."
|
9 |
intro = f"""
|
10 |
<p>Discover and compare various AI model services, including SaaS and "Deploy Yourself" solutions, based on the Total Cost of Ownership for their deployment. π</p>
|
11 |
<p>Please keep in mind that our focus is on getting the AI model service up and running, not accounting for additional maintenance costs.π</p>
|
12 |
"""
|
13 |
-
|
14 |
-
<p>If you want to contribute to the calculator by <strong>adding your own AI service option</strong>, follow this <a href="https://huggingface.co/spaces/mithril-security/TCO_calculator/blob/main/How_to_contribute.md">tutorial</a> π. </p>
|
15 |
-
"""
|
16 |
formula = r"""
|
17 |
$CR = \frac{CIT_{1K} \times IT + COT_{1K} \times OT}{1000}$ <br>
|
18 |
with: <br>
|
@@ -24,8 +22,8 @@ $OT$ = Output Tokens
|
|
24 |
"""
|
25 |
|
26 |
def on_use_case_change(use_case):
|
27 |
-
if use_case == "
|
28 |
-
return gr.update(value=
|
29 |
elif use_case == "Question-Answering":
|
30 |
return gr.update(value=300), gr.update(value=300)
|
31 |
else:
|
@@ -43,13 +41,13 @@ with gr.Blocks(theme=style) as demo:
|
|
43 |
with gr.Row():
|
44 |
with gr.Column():
|
45 |
with gr.Row():
|
46 |
-
use_case = gr.Dropdown(["Summarize", "Question-Answering", "Classification"], value="
|
47 |
with gr.Accordion("Click here if you want to customize the number of input and output tokens per request", open=False):
|
48 |
with gr.Row():
|
49 |
input_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Input tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
|
50 |
output_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Output tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
|
51 |
with gr.Row(visible=False):
|
52 |
-
num_users = gr.Number(value="
|
53 |
|
54 |
use_case.change(on_use_case_change, inputs=use_case, outputs=[input_tokens, output_tokens])
|
55 |
|
|
|
3 |
import results
|
4 |
import theme
|
5 |
|
6 |
+
text = "<h1 style='text-align: center; color: #333333; font-size: 40px;'>AI TCO Comparison Calculator -- ML/PLD/SL"
|
7 |
+
text2 = "<h1 style='color: #333333; font-size: 20px;'>π "
|
8 |
text3 = "Please note that the cost/request only defines the infrastructure cost for deployment. The labor cost must be added for the whole AI model service deployment TCO."
|
9 |
intro = f"""
|
10 |
<p>Discover and compare various AI model services, including SaaS and "Deploy Yourself" solutions, based on the Total Cost of Ownership for their deployment. π</p>
|
11 |
<p>Please keep in mind that our focus is on getting the AI model service up and running, not accounting for additional maintenance costs.π</p>
|
12 |
"""
|
13 |
+
|
|
|
|
|
14 |
formula = r"""
|
15 |
$CR = \frac{CIT_{1K} \times IT + COT_{1K} \times OT}{1000}$ <br>
|
16 |
with: <br>
|
|
|
22 |
"""
|
23 |
|
24 |
def on_use_case_change(use_case):
|
25 |
+
if use_case == "ChatBOT":
|
26 |
+
return gr.update(value=300), gr.update(value=700)
|
27 |
elif use_case == "Question-Answering":
|
28 |
return gr.update(value=300), gr.update(value=300)
|
29 |
else:
|
|
|
41 |
with gr.Row():
|
42 |
with gr.Column():
|
43 |
with gr.Row():
|
44 |
+
use_case = gr.Dropdown(["Summarize", "Question-Answering", "Classification","ChatBOT"], value="ChatBOT", label=" Describe your use case ")
|
45 |
with gr.Accordion("Click here if you want to customize the number of input and output tokens per request", open=False):
|
46 |
with gr.Row():
|
47 |
input_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Input tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
|
48 |
output_tokens = gr.Slider(minimum=1, maximum=1000, value=300, step=1, label=" Output tokens per request", info="We suggest a value that we believe best suit your use case choice but feel free to adjust", interactive=True)
|
49 |
with gr.Row(visible=False):
|
50 |
+
num_users = gr.Number(value="100", interactive = True, label=" Number of users for your service ")
|
51 |
|
52 |
use_case.change(on_use_case_change, inputs=use_case, outputs=[input_tokens, output_tokens])
|
53 |
|