Spaces:
Running
Running
from gradio.components import Component | |
import gradio as gr | |
from abc import ABC, abstractclassmethod | |
import inspect | |
class BaseTCOModel(ABC): | |
# TO DO: Find way to specify which component should be used for computing cost | |
def __setattr__(self, name, value): | |
if isinstance(value, Component): | |
self._components.append(value) | |
self.__dict__[name] = value | |
def __init__(self): | |
super(BaseTCOModel, self).__setattr__("_components", []) | |
def get_components(self) -> list[Component]: | |
return self._components | |
def get_components_for_cost_computing(self): | |
return self.components_for_cost_computing | |
def get_name(self): | |
return self.name | |
def register_components_for_cost_computing(self): | |
args = inspect.getfullargspec(self.compute_cost_per_token)[0][1:] | |
self.components_for_cost_computing = [self.__getattribute__(arg) for arg in args] | |
def compute_cost_per_token(self): | |
pass | |
def render(self): | |
pass | |
def set_name(self, name): | |
self.name = name | |
def set_formula(self, formula): | |
self.formula = formula | |
def get_formula(self): | |
return self.formula | |
#The name of your new model service's class | |
class NewModel(BaseTCOModel): | |
def __init__(self): | |
#Name of the AI model service and the category it belongs to (SaaS, Open source) | |
self.set_name("(Category) Service name") | |
self.set_latency("The average latency of your model") | |
super().__init__() | |
def render(self): | |
#Create as many Gradio components as you want to provide information or customization to the user | |
#Put all their visibility to False | |
#Don't forget to put the interactive parameter of the component to False if the value is fixed | |
self.model_parameter = gr.Dropdown(["Option 1", "Option 2"], value="Option 1", interactive=True, | |
label="Title for this parameter", | |
visible=False, info="Add some information to clarify specific aspects of your parameter") | |
#Put the values of the input and output cost per token | |
#These values can be updated using a function above that is triggered by a change in the parameters | |
#Put default values accordingly to the default parameters | |
self.input_cost_per_token = gr.Number(0.1, visible=False, | |
label="($) Price/1K input prompt tokens", | |
interactive=False | |
) | |
self.output_cost_per_token = gr.Number(0.2, visible=False, | |
label="($) Price/1K output prompt tokens", | |
interactive=False | |
) | |
#Create update functions that adjust the values of your cost/token depending on user's choices | |
def on_model_parameter_change(model_parameter): | |
if model_parameter == "Option 1": | |
input_tokens_cost_per_token = 0.1 | |
output_tokens_cost_per_token = 0.2 | |
else: | |
input_tokens_cost_per_token = 0.2 | |
output_tokens_cost_per_token = 0.4 | |
return input_tokens_cost_per_token, output_tokens_cost_per_token | |
#Trigger the values modification linked to the parameter change | |
self.model_parameter.change(on_model_parameter_change, inputs=self.model_parameter, outputs=[self.input_cost_per_token, self.output_cost_per_token]) | |
#Add the labor cost of your solution | |
#Note that for an Open Source solution, we estimate it to 1000 $ per month and for a SaaS solution to 0 | |
self.labor = gr.Number(0, visible=False, | |
label="($) Labor cost per month", | |
info="This is an estimate of the labor cost of the AI engineer in charge of deploying the model", | |
interactive=True | |
) | |
def compute_cost_per_token(self, input_cost_per_token, output_cost_per_token, labor): | |
#Additional computation on your cost_per_token values | |
#You often need to convert some values here | |
return input_cost_per_token, output_cost_per_token, labor |