Spaces:
Runtime error
Runtime error
Commit
•
31e1648
1
Parent(s):
9c25812
Add pricing touchups
Browse files
app.py
CHANGED
@@ -76,7 +76,7 @@ def count_files(*inputs):
|
|
76 |
file_counter+=len(files)
|
77 |
uses_custom = inputs[-1]
|
78 |
type_of_thing = inputs[-4]
|
79 |
-
|
80 |
experimental_faces = inputs[-6]
|
81 |
if(uses_custom):
|
82 |
Training_Steps = int(inputs[-3])
|
@@ -98,7 +98,7 @@ def count_files(*inputs):
|
|
98 |
elif(selected_model == "v2-768"):
|
99 |
its = 0.5
|
100 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
|
101 |
-
The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
|
102 |
If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>'''
|
103 |
else:
|
104 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>'''
|
@@ -572,9 +572,10 @@ with gr.Blocks(css=css) as demo:
|
|
572 |
#file.change(fn=update_steps,inputs=file_collection, outputs=steps)
|
573 |
file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
574 |
|
|
|
575 |
base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
576 |
steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
577 |
-
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[
|
578 |
|
579 |
#Give more options if the user wants to finish everything after training
|
580 |
if(is_spaces):
|
|
|
76 |
file_counter+=len(files)
|
77 |
uses_custom = inputs[-1]
|
78 |
type_of_thing = inputs[-4]
|
79 |
+
selected_model = inputs[-5]
|
80 |
experimental_faces = inputs[-6]
|
81 |
if(uses_custom):
|
82 |
Training_Steps = int(inputs[-3])
|
|
|
98 |
elif(selected_model == "v2-768"):
|
99 |
its = 0.5
|
100 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps. The training should take around {round(Training_Steps/its, 2)} seconds, or {round((Training_Steps/its)/60, 2)} minutes.
|
101 |
+
The setup, compression and uploading the model can take up to 20 minutes.<br>As the T4-Small GPU costs US$0.60 for 1h, <span style="font-size: 120%"><b>the estimated cost for this training is below US${round((((Training_Steps/its)/3600)+0.3+0.1)*0.60, 2)}.</b></span><br><br>
|
102 |
If you check the box below the GPU attribution will automatically removed after training is done and the model is uploaded. If not, don't forget to come back here and swap the hardware back to CPU.<br><br>'''
|
103 |
else:
|
104 |
summary_sentence = f'''You are going to train {concept_counter} {type_of_thing}(s), with {file_counter} images for {Training_Steps} steps.<br><br>'''
|
|
|
572 |
#file.change(fn=update_steps,inputs=file_collection, outputs=steps)
|
573 |
file.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
574 |
|
575 |
+
thing_experimental.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
576 |
base_model_to_use.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
577 |
steps.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
578 |
+
perc_txt_encoder.change(fn=count_files, inputs=file_collection+[thing_experimental]+[base_model_to_use]+[type_of_thing]+[steps]+[perc_txt_encoder]+[swap_auto_calculated], outputs=[training_summary, training_summary_text], queue=False)
|
579 |
|
580 |
#Give more options if the user wants to finish everything after training
|
581 |
if(is_spaces):
|