John6666 commited on
Commit
26e6b0a
1 Parent(s): c81fdb0

Upload 24 files

Browse files
README.md CHANGED
@@ -1,12 +1,14 @@
1
- ---
2
- title: Votepurchase Crash
3
- emoji: 🐨
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 4.42.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+ ---
2
+ title: Votepurchase Multiple Model (SD1.5/SDXL Text-to-Image)
3
+ emoji: 🖼🖼️📦
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ license: mit
10
+ short_description: Text-to-Image
11
+ pinned: true
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ # DiffuseCraft
6
+ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers,
7
+ get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail,
8
+ get_t2i_model_info, get_all_lora_tupled_list, update_loras,
9
+ apply_lora_prompt, download_my_lora, search_civitai_lora,
10
+ select_civitai_lora, search_civitai_lora_json,
11
+ preset_quality, preset_styles, process_style_prompt)
12
+ # Translator
13
+ from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
+ get_llm_formats, get_dolphin_model_format, get_dolphin_models,
15
+ get_dolphin_model_info, select_dolphin_model, select_dolphin_format, get_dolphin_sysprompt)
16
+ # Tagger
17
+ from tagger.v2 import v2_upsampling_prompt, V2_ALL_MODELS
18
+ from tagger.utils import (gradio_copy_text, gradio_copy_prompt, COPY_ACTION_JS,
19
+ V2_ASPECT_RATIO_OPTIONS, V2_RATING_OPTIONS, V2_LENGTH_OPTIONS, V2_IDENTITY_OPTIONS)
20
+ from tagger.tagger import (predict_tags_wd, convert_danbooru_to_e621_prompt,
21
+ remove_specific_prompt, insert_recom_prompt, compose_prompt_to_copy,
22
+ translate_prompt, select_random_character)
23
+ from tagger.fl2sd3longcap import predict_tags_fl2_sd3
24
+ def description_ui():
25
+ gr.Markdown(
26
+ """
27
+ ## Danbooru Tags Transformer V2 Demo with WD Tagger & SD3 Long Captioner
28
+ (Image =>) Prompt => Upsampled longer prompt
29
+ - Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with 🤗 transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
30
+ - Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)\
31
+ , gokaygokay's [Florence-2-SD3-Captioner](https://huggingface.co/gokaygokay/Florence-2-SD3-Captioner)
32
+ """
33
+ )
34
+
35
+
36
+ MAX_SEED = np.iinfo(np.int32).max
37
+ MAX_IMAGE_SIZE = 1216
38
+
39
+ css = """
40
+ #container { margin: 0 auto; !important; }
41
+ #col-container { margin: 0 auto; !important; }
42
+ #result { display: inline-block; max-width: 520px; max-height: 520px; width: 520px; height: 520px; align: center; margin: 0px auto; !important; }
43
+ .lora { display: inline-block; min-width: 480px; !important; }
44
+ #model-info { text-align: center; !important; }
45
+ """
46
+
47
+ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
48
+ with gr.Tab("Image Generator"):
49
+ with gr.Column(elem_id="col-container"):
50
+ with gr.Row():
51
+ prompt = gr.Text(label="Prompt", show_label=False, lines=1, max_lines=8, placeholder="Enter your prompt", container=False)
52
+
53
+ with gr.Row():
54
+ run_button = gr.Button("Run")
55
+ run_translate_button = gr.Button("Translate")
56
+
57
+ result = gr.Image(label="Result", elem_id="result", show_label=False, interactive=False,
58
+ show_download_button=True, show_share_button=False, container=True)
59
+
60
+ with gr.Accordion("Advanced Settings", open=False):
61
+ with gr.Row():
62
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt",
63
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
64
+
65
+ with gr.Row():
66
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
67
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
68
+
69
+ with gr.Row():
70
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
71
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
72
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
73
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
74
+
75
+ with gr.Row():
76
+ with gr.Column(scale=4):
77
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
78
+ choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
79
+ allow_custom_value=True, interactive=True, min_width=320)
80
+ model_info = gr.Markdown(elem_id="model-info")
81
+ with gr.Column(scale=1):
82
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
83
+
84
+ with gr.Row():
85
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
86
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
87
+
88
+ with gr.Accordion("LoRA", open=True, visible=True):
89
+ with gr.Row():
90
+ with gr.Column():
91
+ with gr.Row():
92
+ lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
93
+ lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
94
+ with gr.Row():
95
+ lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
96
+ lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
97
+ lora1_md = gr.Markdown(value="", visible=False)
98
+ with gr.Column():
99
+ with gr.Row():
100
+ lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
101
+ lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
102
+ with gr.Row():
103
+ lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
104
+ lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
105
+ lora2_md = gr.Markdown(value="", visible=False)
106
+ with gr.Column():
107
+ with gr.Row():
108
+ lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
109
+ lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
110
+ with gr.Row():
111
+ lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
112
+ lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
113
+ lora3_md = gr.Markdown(value="", visible=False)
114
+ with gr.Column():
115
+ with gr.Row():
116
+ lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
117
+ lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
118
+ with gr.Row():
119
+ lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
120
+ lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
121
+ lora4_md = gr.Markdown(value="", visible=False)
122
+ with gr.Column():
123
+ with gr.Row():
124
+ lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
125
+ lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
126
+ with gr.Row():
127
+ lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
128
+ lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
129
+ lora5_md = gr.Markdown(value="", visible=False)
130
+ with gr.Accordion("From URL", open=True, visible=True):
131
+ with gr.Row():
132
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
133
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0"], value=["Pony", "SDXL 1.0"])
134
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
135
+ with gr.Row():
136
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
137
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
138
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
139
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
140
+ lora_download = gr.Button("Get and set LoRA and apply to prompt")
141
+
142
+ with gr.Row():
143
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
144
+ quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
145
+ style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None")
146
+
147
+ with gr.Accordion("Translation Settings", open=False):
148
+ chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
149
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
150
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
151
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
152
+ with gr.Row():
153
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
154
+ chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
155
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
156
+ chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
157
+ chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
158
+ chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
159
+
160
+ examples = gr.Examples(
161
+ examples = [
162
+ ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
163
+ ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
164
+ ["kafuu chino, 1girl, solo"],
165
+ ["1girl"],
166
+ ["beautiful sunset"],
167
+ ],
168
+ inputs=[prompt],
169
+ )
170
+
171
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
172
+ triggers=[run_button.click, prompt.submit],
173
+ fn=infer,
174
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
175
+ guidance_scale, num_inference_steps, model_name,
176
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
177
+ sampler, vae_model],
178
+ outputs=[result],
179
+ queue=True,
180
+ show_progress="full",
181
+ show_api=True,
182
+ )
183
+
184
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
185
+ triggers=[run_translate_button.click],
186
+ fn=_infer, # dummy fn for api
187
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
188
+ guidance_scale, num_inference_steps, model_name,
189
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
190
+ sampler, vae_model],
191
+ outputs=[result],
192
+ queue=False,
193
+ show_api=True,
194
+ api_name="infer_translate",
195
+ ).success(
196
+ fn=dolphin_respond_auto,
197
+ inputs=[prompt, chatbot],
198
+ outputs=[chatbot],
199
+ queue=True,
200
+ show_progress="full",
201
+ show_api=False,
202
+ ).success(
203
+ fn=dolphin_parse_simple,
204
+ inputs=[prompt, chatbot],
205
+ outputs=[prompt],
206
+ queue=False,
207
+ show_api=False,
208
+ ).success(
209
+ fn=infer,
210
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
211
+ guidance_scale, num_inference_steps, model_name,
212
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
213
+ sampler, vae_model],
214
+ outputs=[result],
215
+ queue=True,
216
+ show_progress="full",
217
+ show_api=False,
218
+ ).success(lambda: None, None, chatbot, queue=False, show_api=False)\
219
+ .success(pass_result, [result], [result], queue=False, show_api=False) # dummy fn for api
220
+
221
+ gr.on(
222
+ triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
223
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
224
+ fn=update_loras,
225
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
226
+ outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
227
+ lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
228
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
229
+ queue=False,
230
+ trigger_mode="once",
231
+ show_api=False,
232
+ )
233
+ lora1_copy.click(apply_lora_prompt, [prompt, lora1_info], [prompt], queue=False, show_api=False)
234
+ lora2_copy.click(apply_lora_prompt, [prompt, lora2_info], [prompt], queue=False, show_api=False)
235
+ lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
236
+ lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
237
+ lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
238
+
239
+ gr.on(
240
+ triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
241
+ fn=search_civitai_lora,
242
+ inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
243
+ outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
244
+ scroll_to_output=True,
245
+ queue=True,
246
+ show_api=False,
247
+ )
248
+ lora_search_civitai_json.change(search_civitai_lora_json, [lora_search_civitai_query, lora_search_civitai_basemodel], [lora_search_civitai_json], queue=True, show_api=True) # fn for api
249
+ lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
250
+ gr.on(
251
+ triggers=[lora_download.click, lora_download_url.submit],
252
+ fn=download_my_lora,
253
+ inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
254
+ outputs=[lora1, lora2, lora3, lora4, lora5],
255
+ scroll_to_output=True,
256
+ queue=True,
257
+ show_api=False,
258
+ )
259
+
260
+ recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
261
+ gr.on(
262
+ triggers=[quality_selector.change, style_selector.change],
263
+ fn=process_style_prompt,
264
+ inputs=[prompt, negative_prompt, style_selector, quality_selector],
265
+ outputs=[prompt, negative_prompt],
266
+ queue=False,
267
+ trigger_mode="once",
268
+ )
269
+
270
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
271
+ model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
272
+
273
+ chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
274
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
275
+ chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
276
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
277
+
278
+ # Tagger
279
+ with gr.Tab("Tags Transformer with Tagger"):
280
+ with gr.Column():
281
+ with gr.Group():
282
+ input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
283
+ with gr.Accordion(label="Advanced options", open=False):
284
+ general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
285
+ character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
286
+ input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
287
+ recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
288
+ image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
289
+ keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
290
+ generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
291
+ with gr.Group():
292
+ with gr.Row():
293
+ input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
294
+ input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
295
+ random_character = gr.Button(value="Random character 🎲", size="sm")
296
+ input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
297
+ input_tags_to_copy = gr.Textbox(value="", visible=False)
298
+ with gr.Row():
299
+ copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
300
+ copy_prompt_btn_input = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
301
+ translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
302
+ tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
303
+ input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
304
+ with gr.Accordion(label="Advanced options", open=False):
305
+ input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
306
+ input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
307
+ input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
308
+ input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
309
+ model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
310
+ dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
311
+ recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
312
+ recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
313
+ generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
314
+ with gr.Row():
315
+ with gr.Group():
316
+ output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
317
+ with gr.Row():
318
+ copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
319
+ copy_prompt_btn = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
320
+ with gr.Group():
321
+ output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
322
+ with gr.Row():
323
+ copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
324
+ copy_prompt_btn_pony = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
325
+
326
+ random_character.click(select_random_character, [input_copyright, input_character], [input_copyright, input_character], queue=False, show_api=False)
327
+
328
+ translate_input_prompt_button.click(translate_prompt, [input_general], [input_general], queue=False, show_api=False)
329
+ translate_input_prompt_button.click(translate_prompt, [input_character], [input_character], queue=False, show_api=False)
330
+ translate_input_prompt_button.click(translate_prompt, [input_copyright], [input_copyright], queue=False, show_api=False)
331
+
332
+ generate_from_image_btn.click(
333
+ lambda: ("", "", ""), None, [input_copyright, input_character, input_general], queue=False, show_api=False,
334
+ ).success(
335
+ predict_tags_wd,
336
+ [input_image, input_general, image_algorithms, general_threshold, character_threshold],
337
+ [input_copyright, input_character, input_general, copy_input_btn],
338
+ show_api=False,
339
+ ).success(
340
+ predict_tags_fl2_sd3, [input_image, input_general, image_algorithms], [input_general], show_api=False,
341
+ ).success(
342
+ remove_specific_prompt, [input_general, keep_tags], [input_general], queue=False, show_api=False,
343
+ ).success(
344
+ convert_danbooru_to_e621_prompt, [input_general, input_tag_type], [input_general], queue=False, show_api=False,
345
+ ).success(
346
+ insert_recom_prompt, [input_general, dummy_np, recom_prompt], [input_general, dummy_np], queue=False, show_api=False,
347
+ ).success(lambda: gr.update(interactive=True), None, [copy_prompt_btn_input], queue=False, show_api=False)
348
+ copy_input_btn.click(compose_prompt_to_copy, [input_character, input_copyright, input_general], [input_tags_to_copy], show_api=False)\
349
+ .success(gradio_copy_text, [input_tags_to_copy], js=COPY_ACTION_JS, show_api=False)
350
+ copy_prompt_btn_input.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy], show_api=False)\
351
+ .success(gradio_copy_prompt, inputs=[input_tags_to_copy], outputs=[prompt], show_api=False)
352
+
353
+ generate_btn.click(
354
+ v2_upsampling_prompt,
355
+ [model_name, input_copyright, input_character, input_general,
356
+ input_rating, input_aspect_ratio, input_length, input_identity, input_ban_tags],
357
+ [output_text],
358
+ show_api=False,
359
+ ).success(
360
+ convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False, show_api=False,
361
+ ).success(
362
+ insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False, show_api=False,
363
+ ).success(
364
+ insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
365
+ ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
366
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
367
+ copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
368
+ copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
369
+ copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
370
+ copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
371
+
372
+ demo.queue()
373
+ demo.launch()
dc.py ADDED
@@ -0,0 +1,1328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ from stablepy import (
11
+ CONTROLNET_MODEL_IDS,
12
+ VALID_TASKS,
13
+ T2I_PREPROCESSOR_NAME,
14
+ FLASH_LORA,
15
+ SCHEDULER_CONFIG_MAP,
16
+ scheduler_names,
17
+ IP_ADAPTER_MODELS,
18
+ IP_ADAPTERS_SD,
19
+ IP_ADAPTERS_SDXL,
20
+ REPO_IMAGE_ENCODER,
21
+ ALL_PROMPT_WEIGHT_OPTIONS,
22
+ SD15_TASKS,
23
+ SDXL_TASKS,
24
+ )
25
+ import urllib.parse
26
+ import gradio as gr
27
+ from PIL import Image
28
+ import IPython.display
29
+ import time, json
30
+ from IPython.utils import capture
31
+ import logging
32
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
33
+ import diffusers
34
+ diffusers.utils.logging.set_verbosity(40)
35
+ import warnings
36
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
37
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
38
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
39
+ from stablepy import logger
40
+ logger.setLevel(logging.CRITICAL)
41
+
42
+ from env import (
43
+ hf_token,
44
+ hf_read_token, # to use only for private repos
45
+ CIVITAI_API_KEY,
46
+ HF_LORA_PRIVATE_REPOS1,
47
+ HF_LORA_PRIVATE_REPOS2,
48
+ HF_LORA_ESSENTIAL_PRIVATE_REPO,
49
+ HF_VAE_PRIVATE_REPO,
50
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO,
51
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
52
+ directory_models,
53
+ directory_loras,
54
+ directory_vaes,
55
+ directory_embeds,
56
+ directory_embeds_sdxl,
57
+ directory_embeds_positive_sdxl,
58
+ load_diffusers_format_model,
59
+ download_model_list,
60
+ download_lora_list,
61
+ download_vae_list,
62
+ download_embeds,
63
+ )
64
+
65
+ preprocessor_controlnet = {
66
+ "openpose": [
67
+ "Openpose",
68
+ "None",
69
+ ],
70
+ "scribble": [
71
+ "HED",
72
+ "Pidinet",
73
+ "None",
74
+ ],
75
+ "softedge": [
76
+ "Pidinet",
77
+ "HED",
78
+ "HED safe",
79
+ "Pidinet safe",
80
+ "None",
81
+ ],
82
+ "segmentation": [
83
+ "UPerNet",
84
+ "None",
85
+ ],
86
+ "depth": [
87
+ "DPT",
88
+ "Midas",
89
+ "None",
90
+ ],
91
+ "normalbae": [
92
+ "NormalBae",
93
+ "None",
94
+ ],
95
+ "lineart": [
96
+ "Lineart",
97
+ "Lineart coarse",
98
+ "Lineart (anime)",
99
+ "None",
100
+ "None (anime)",
101
+ ],
102
+ "shuffle": [
103
+ "ContentShuffle",
104
+ "None",
105
+ ],
106
+ "canny": [
107
+ "Canny"
108
+ ],
109
+ "mlsd": [
110
+ "MLSD"
111
+ ],
112
+ "ip2p": [
113
+ "ip2p"
114
+ ],
115
+ }
116
+
117
+ task_stablepy = {
118
+ 'txt2img': 'txt2img',
119
+ 'img2img': 'img2img',
120
+ 'inpaint': 'inpaint',
121
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
122
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
123
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
124
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
125
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
126
+ 'openpose ControlNet': 'openpose',
127
+ 'canny ControlNet': 'canny',
128
+ 'mlsd ControlNet': 'mlsd',
129
+ 'scribble ControlNet': 'scribble',
130
+ 'softedge ControlNet': 'softedge',
131
+ 'segmentation ControlNet': 'segmentation',
132
+ 'depth ControlNet': 'depth',
133
+ 'normalbae ControlNet': 'normalbae',
134
+ 'lineart ControlNet': 'lineart',
135
+ # 'lineart_anime ControlNet': 'lineart_anime',
136
+ 'shuffle ControlNet': 'shuffle',
137
+ 'ip2p ControlNet': 'ip2p',
138
+ 'optical pattern ControlNet': 'pattern',
139
+ 'tile realistic': 'sdxl_tile_realistic',
140
+ }
141
+
142
+ task_model_list = list(task_stablepy.keys())
143
+
144
+
145
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
146
+ url = url.strip()
147
+
148
+ if "drive.google.com" in url:
149
+ original_dir = os.getcwd()
150
+ os.chdir(directory)
151
+ os.system(f"gdown --fuzzy {url}")
152
+ os.chdir(original_dir)
153
+ elif "huggingface.co" in url:
154
+ url = url.replace("?download=true", "")
155
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
156
+ if "/blob/" in url:
157
+ url = url.replace("/blob/", "/resolve/")
158
+ user_header = f'"Authorization: Bearer {hf_token}"'
159
+ if hf_token:
160
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
161
+ else:
162
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
163
+ elif "civitai.com" in url:
164
+ if "?" in url:
165
+ url = url.split("?")[0]
166
+ if civitai_api_key:
167
+ url = url + f"?token={civitai_api_key}"
168
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
169
+ else:
170
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
171
+ else:
172
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
173
+
174
+
175
+ def get_model_list(directory_path):
176
+ model_list = []
177
+ valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
178
+
179
+ for filename in os.listdir(directory_path):
180
+ if os.path.splitext(filename)[1] in valid_extensions:
181
+ name_without_extension = os.path.splitext(filename)[0]
182
+ file_path = os.path.join(directory_path, filename)
183
+ # model_list.append((name_without_extension, file_path))
184
+ model_list.append(file_path)
185
+ print('\033[34mFILE: ' + file_path + '\033[0m')
186
+ return model_list
187
+
188
+
189
+ def process_string(input_string):
190
+ parts = input_string.split('/')
191
+
192
+ if len(parts) == 2:
193
+ first_element = parts[1]
194
+ complete_string = input_string
195
+ result = (first_element, complete_string)
196
+ return result
197
+ else:
198
+ return None
199
+
200
+ ## BEGIN MOD
201
+ from modutils import (
202
+ to_list,
203
+ list_uniq,
204
+ list_sub,
205
+ get_model_id_list,
206
+ get_tupled_embed_list,
207
+ get_tupled_model_list,
208
+ get_lora_model_list,
209
+ download_private_repo,
210
+ )
211
+
212
+ # - **Download Models**
213
+ download_model = ", ".join(download_model_list)
214
+ # - **Download VAEs**
215
+ download_vae = ", ".join(download_vae_list)
216
+ # - **Download LoRAs**
217
+ download_lora = ", ".join(download_lora_list)
218
+
219
+ #download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, directory_loras, True)
220
+ download_private_repo(HF_VAE_PRIVATE_REPO, directory_vaes, False)
221
+
222
+ load_diffusers_format_model = list_uniq(load_diffusers_format_model + get_model_id_list())
223
+ ## END MOD
224
+
225
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
226
+ hf_token = os.environ.get("HF_TOKEN")
227
+
228
+ # Download stuffs
229
+ for url in [url.strip() for url in download_model.split(',')]:
230
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
231
+ download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
232
+ for url in [url.strip() for url in download_vae.split(',')]:
233
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
234
+ download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
235
+ for url in [url.strip() for url in download_lora.split(',')]:
236
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
237
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
238
+
239
+ # Download Embeddings
240
+ for url_embed in download_embeds:
241
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
242
+ download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
243
+
244
+ # Build list models
245
+ embed_list = get_model_list(directory_embeds)
246
+ model_list = get_model_list(directory_models)
247
+ model_list = load_diffusers_format_model + model_list
248
+ ## BEGIN MOD
249
+ lora_model_list = get_lora_model_list()
250
+ vae_model_list = get_model_list(directory_vaes)
251
+ vae_model_list.insert(0, "None")
252
+
253
+ #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, directory_embeds_sdxl, False)
254
+ #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, directory_embeds_positive_sdxl, False)
255
+ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directory_embeds_positive_sdxl)
256
+
257
+ def get_embed_list(pipeline_name):
258
+ return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
259
+
260
+
261
+ ## END MOD
262
+
263
+ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
264
+
265
+ upscaler_dict_gui = {
266
+ None : None,
267
+ "Lanczos" : "Lanczos",
268
+ "Nearest" : "Nearest",
269
+ "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
270
+ "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
271
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
272
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
273
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
274
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
275
+ "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
276
+ "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
277
+ "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
278
+ "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
279
+ "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
280
+ "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
281
+ "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
282
+ "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
283
+ }
284
+
285
+
286
+ def extract_parameters(input_string):
287
+ parameters = {}
288
+ input_string = input_string.replace("\n", "")
289
+
290
+ if not "Negative prompt:" in input_string:
291
+ print("Negative prompt not detected")
292
+ parameters["prompt"] = input_string
293
+ return parameters
294
+
295
+ parm = input_string.split("Negative prompt:")
296
+ parameters["prompt"] = parm[0]
297
+ if not "Steps:" in parm[1]:
298
+ print("Steps not detected")
299
+ parameters["neg_prompt"] = parm[1]
300
+ return parameters
301
+ parm = parm[1].split("Steps:")
302
+ parameters["neg_prompt"] = parm[0]
303
+ input_string = "Steps:" + parm[1]
304
+
305
+ # Extracting Steps
306
+ steps_match = re.search(r'Steps: (\d+)', input_string)
307
+ if steps_match:
308
+ parameters['Steps'] = int(steps_match.group(1))
309
+
310
+ # Extracting Size
311
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
312
+ if size_match:
313
+ parameters['Size'] = size_match.group(1)
314
+ width, height = map(int, parameters['Size'].split('x'))
315
+ parameters['width'] = width
316
+ parameters['height'] = height
317
+
318
+ # Extracting other parameters
319
+ other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
320
+ for param in other_parameters:
321
+ parameters[param[0]] = param[1].strip('"')
322
+
323
+ return parameters
324
+
325
+
326
+ ## BEGIN MOD
327
+ class GuiSD:
328
+ def __init__(self):
329
+ self.model = None
330
+
331
+ print("Loading model...")
332
+ self.model = Model_Diffusers(
333
+ base_model_id="cagliostrolab/animagine-xl-3.1",
334
+ task_name="txt2img",
335
+ vae_model=None,
336
+ type_model_precision=torch.float16,
337
+ retain_task_model_in_cache=False,
338
+ )
339
+
340
+ def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
341
+ progress(0, desc="Start inference...")
342
+ images, image_list = model(**pipe_params)
343
+ progress(1, desc="Inference completed.")
344
+ if not isinstance(images, list): images = [images]
345
+ img = []
346
+ for image in images:
347
+ img.append((image, None))
348
+ return img
349
+
350
+ def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
351
+
352
+ yield f"Loading model: {model_name}"
353
+
354
+ vae_model = vae_model if vae_model != "None" else None
355
+
356
+ if model_name in model_list:
357
+ model_is_xl = "xl" in model_name.lower()
358
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
359
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
360
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
361
+
362
+ if incompatible_vae:
363
+ vae_model = None
364
+
365
+
366
+ self.model.load_pipe(
367
+ model_name,
368
+ task_name=task_stablepy[task],
369
+ vae_model=vae_model if vae_model != "None" else None,
370
+ type_model_precision=torch.float16,
371
+ retain_task_model_in_cache=False,
372
+ )
373
+ yield f"Model loaded: {model_name}"
374
+
375
+ @spaces.GPU
376
+ def generate_pipeline(
377
+ self,
378
+ prompt,
379
+ neg_prompt,
380
+ num_images,
381
+ steps,
382
+ cfg,
383
+ clip_skip,
384
+ seed,
385
+ lora1,
386
+ lora_scale1,
387
+ lora2,
388
+ lora_scale2,
389
+ lora3,
390
+ lora_scale3,
391
+ lora4,
392
+ lora_scale4,
393
+ lora5,
394
+ lora_scale5,
395
+ sampler,
396
+ img_height,
397
+ img_width,
398
+ model_name,
399
+ vae_model,
400
+ task,
401
+ image_control,
402
+ preprocessor_name,
403
+ preprocess_resolution,
404
+ image_resolution,
405
+ style_prompt, # list []
406
+ style_json_file,
407
+ image_mask,
408
+ strength,
409
+ low_threshold,
410
+ high_threshold,
411
+ value_threshold,
412
+ distance_threshold,
413
+ controlnet_output_scaling_in_unet,
414
+ controlnet_start_threshold,
415
+ controlnet_stop_threshold,
416
+ textual_inversion,
417
+ syntax_weights,
418
+ upscaler_model_path,
419
+ upscaler_increases_size,
420
+ esrgan_tile,
421
+ esrgan_tile_overlap,
422
+ hires_steps,
423
+ hires_denoising_strength,
424
+ hires_sampler,
425
+ hires_prompt,
426
+ hires_negative_prompt,
427
+ hires_before_adetailer,
428
+ hires_after_adetailer,
429
+ loop_generation,
430
+ leave_progress_bar,
431
+ disable_progress_bar,
432
+ image_previews,
433
+ display_images,
434
+ save_generated_images,
435
+ image_storage_location,
436
+ retain_compel_previous_load,
437
+ retain_detailfix_model_previous_load,
438
+ retain_hires_model_previous_load,
439
+ t2i_adapter_preprocessor,
440
+ t2i_adapter_conditioning_scale,
441
+ t2i_adapter_conditioning_factor,
442
+ xformers_memory_efficient_attention,
443
+ freeu,
444
+ generator_in_cpu,
445
+ adetailer_inpaint_only,
446
+ adetailer_verbose,
447
+ adetailer_sampler,
448
+ adetailer_active_a,
449
+ prompt_ad_a,
450
+ negative_prompt_ad_a,
451
+ strength_ad_a,
452
+ face_detector_ad_a,
453
+ person_detector_ad_a,
454
+ hand_detector_ad_a,
455
+ mask_dilation_a,
456
+ mask_blur_a,
457
+ mask_padding_a,
458
+ adetailer_active_b,
459
+ prompt_ad_b,
460
+ negative_prompt_ad_b,
461
+ strength_ad_b,
462
+ face_detector_ad_b,
463
+ person_detector_ad_b,
464
+ hand_detector_ad_b,
465
+ mask_dilation_b,
466
+ mask_blur_b,
467
+ mask_padding_b,
468
+ retain_task_cache_gui,
469
+ image_ip1,
470
+ mask_ip1,
471
+ model_ip1,
472
+ mode_ip1,
473
+ scale_ip1,
474
+ image_ip2,
475
+ mask_ip2,
476
+ model_ip2,
477
+ mode_ip2,
478
+ scale_ip2,
479
+ progress=gr.Progress(track_tqdm=True),
480
+ ):
481
+ progress(0, desc="Preparing inference...")
482
+
483
+ vae_model = vae_model if vae_model != "None" else None
484
+ loras_list = [lora1, lora2, lora3, lora4, lora5]
485
+ vae_msg = f"VAE: {vae_model}" if vae_model else ""
486
+ msg_lora = []
487
+
488
+ ## BEGIN MOD
489
+ prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
490
+ global lora_model_list
491
+ lora_model_list = get_lora_model_list()
492
+ ## END MOD
493
+
494
+ if model_name in model_list:
495
+ model_is_xl = "xl" in model_name.lower()
496
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
497
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
498
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
499
+
500
+ if incompatible_vae:
501
+ msg_inc_vae = (
502
+ f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
503
+ f" are using a { model_type } model. The default VAE "
504
+ "will be used."
505
+ )
506
+ gr.Info(msg_inc_vae)
507
+ vae_msg = msg_inc_vae
508
+ vae_model = None
509
+
510
+ for la in loras_list:
511
+ if la is not None and la != "None" and la in lora_model_list:
512
+ print(la)
513
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
514
+ if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
515
+ msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
516
+ gr.Info(msg_inc_lora)
517
+ msg_lora.append(msg_inc_lora)
518
+
519
+ task = task_stablepy[task]
520
+
521
+ params_ip_img = []
522
+ params_ip_msk = []
523
+ params_ip_model = []
524
+ params_ip_mode = []
525
+ params_ip_scale = []
526
+
527
+ all_adapters = [
528
+ (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
529
+ (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
530
+ ]
531
+
532
+ for imgip, mskip, modelip, modeip, scaleip in all_adapters:
533
+ if imgip:
534
+ params_ip_img.append(imgip)
535
+ if mskip:
536
+ params_ip_msk.append(mskip)
537
+ params_ip_model.append(modelip)
538
+ params_ip_mode.append(modeip)
539
+ params_ip_scale.append(scaleip)
540
+
541
+ # First load
542
+ model_precision = torch.float16
543
+ if not self.model:
544
+ from stablepy import Model_Diffusers
545
+
546
+ print("Loading model...")
547
+ self.model = Model_Diffusers(
548
+ base_model_id=model_name,
549
+ task_name=task,
550
+ vae_model=vae_model if vae_model != "None" else None,
551
+ type_model_precision=model_precision,
552
+ retain_task_model_in_cache=retain_task_cache_gui,
553
+ )
554
+
555
+ if task != "txt2img" and not image_control:
556
+ raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
557
+
558
+ if task == "inpaint" and not image_mask:
559
+ raise ValueError("No mask image found: Specify one in 'Image Mask'")
560
+
561
+ if upscaler_model_path in [None, "Lanczos", "Nearest"]:
562
+ upscaler_model = upscaler_model_path
563
+ else:
564
+ directory_upscalers = 'upscalers'
565
+ os.makedirs(directory_upscalers, exist_ok=True)
566
+
567
+ url_upscaler = upscaler_dict_gui[upscaler_model_path]
568
+
569
+ if not os.path.exists(f"./upscalers/{url_upscaler.split('/')[-1]}"):
570
+ download_things(directory_upscalers, url_upscaler, hf_token)
571
+
572
+ upscaler_model = f"./upscalers/{url_upscaler.split('/')[-1]}"
573
+
574
+ logging.getLogger("ultralytics").setLevel(logging.INFO if adetailer_verbose else logging.ERROR)
575
+
576
+ print("Config model:", model_name, vae_model, loras_list)
577
+
578
+ self.model.load_pipe(
579
+ model_name,
580
+ task_name=task,
581
+ vae_model=vae_model if vae_model != "None" else None,
582
+ type_model_precision=model_precision,
583
+ retain_task_model_in_cache=retain_task_cache_gui,
584
+ )
585
+
586
+ ## BEGIN MOD
587
+ # if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
588
+ # print("No Textual inversion for SDXL")
589
+ ## END MOD
590
+
591
+ adetailer_params_A = {
592
+ "face_detector_ad" : face_detector_ad_a,
593
+ "person_detector_ad" : person_detector_ad_a,
594
+ "hand_detector_ad" : hand_detector_ad_a,
595
+ "prompt": prompt_ad_a,
596
+ "negative_prompt" : negative_prompt_ad_a,
597
+ "strength" : strength_ad_a,
598
+ # "image_list_task" : None,
599
+ "mask_dilation" : mask_dilation_a,
600
+ "mask_blur" : mask_blur_a,
601
+ "mask_padding" : mask_padding_a,
602
+ "inpaint_only" : adetailer_inpaint_only,
603
+ "sampler" : adetailer_sampler,
604
+ }
605
+
606
+ adetailer_params_B = {
607
+ "face_detector_ad" : face_detector_ad_b,
608
+ "person_detector_ad" : person_detector_ad_b,
609
+ "hand_detector_ad" : hand_detector_ad_b,
610
+ "prompt": prompt_ad_b,
611
+ "negative_prompt" : negative_prompt_ad_b,
612
+ "strength" : strength_ad_b,
613
+ # "image_list_task" : None,
614
+ "mask_dilation" : mask_dilation_b,
615
+ "mask_blur" : mask_blur_b,
616
+ "mask_padding" : mask_padding_b,
617
+ }
618
+ pipe_params = {
619
+ "prompt": prompt,
620
+ "negative_prompt": neg_prompt,
621
+ "img_height": img_height,
622
+ "img_width": img_width,
623
+ "num_images": num_images,
624
+ "num_steps": steps,
625
+ "guidance_scale": cfg,
626
+ "clip_skip": clip_skip,
627
+ "seed": seed,
628
+ "image": image_control,
629
+ "preprocessor_name": preprocessor_name,
630
+ "preprocess_resolution": preprocess_resolution,
631
+ "image_resolution": image_resolution,
632
+ "style_prompt": style_prompt if style_prompt else "",
633
+ "style_json_file": "",
634
+ "image_mask": image_mask, # only for Inpaint
635
+ "strength": strength, # only for Inpaint or ...
636
+ "low_threshold": low_threshold,
637
+ "high_threshold": high_threshold,
638
+ "value_threshold": value_threshold,
639
+ "distance_threshold": distance_threshold,
640
+ "lora_A": lora1 if lora1 != "None" else None,
641
+ "lora_scale_A": lora_scale1,
642
+ "lora_B": lora2 if lora2 != "None" else None,
643
+ "lora_scale_B": lora_scale2,
644
+ "lora_C": lora3 if lora3 != "None" else None,
645
+ "lora_scale_C": lora_scale3,
646
+ "lora_D": lora4 if lora4 != "None" else None,
647
+ "lora_scale_D": lora_scale4,
648
+ "lora_E": lora5 if lora5 != "None" else None,
649
+ "lora_scale_E": lora_scale5,
650
+ ## BEGIN MOD
651
+ "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
652
+ ## END MOD
653
+ "syntax_weights": syntax_weights, # "Classic"
654
+ "sampler": sampler,
655
+ "xformers_memory_efficient_attention": xformers_memory_efficient_attention,
656
+ "gui_active": True,
657
+ "loop_generation": loop_generation,
658
+ "controlnet_conditioning_scale": float(controlnet_output_scaling_in_unet),
659
+ "control_guidance_start": float(controlnet_start_threshold),
660
+ "control_guidance_end": float(controlnet_stop_threshold),
661
+ "generator_in_cpu": generator_in_cpu,
662
+ "FreeU": freeu,
663
+ "adetailer_A": adetailer_active_a,
664
+ "adetailer_A_params": adetailer_params_A,
665
+ "adetailer_B": adetailer_active_b,
666
+ "adetailer_B_params": adetailer_params_B,
667
+ "leave_progress_bar": leave_progress_bar,
668
+ "disable_progress_bar": disable_progress_bar,
669
+ "image_previews": image_previews,
670
+ "display_images": display_images,
671
+ "save_generated_images": save_generated_images,
672
+ "image_storage_location": image_storage_location,
673
+ "retain_compel_previous_load": retain_compel_previous_load,
674
+ "retain_detailfix_model_previous_load": retain_detailfix_model_previous_load,
675
+ "retain_hires_model_previous_load": retain_hires_model_previous_load,
676
+ "t2i_adapter_preprocessor": t2i_adapter_preprocessor,
677
+ "t2i_adapter_conditioning_scale": float(t2i_adapter_conditioning_scale),
678
+ "t2i_adapter_conditioning_factor": float(t2i_adapter_conditioning_factor),
679
+ "upscaler_model_path": upscaler_model,
680
+ "upscaler_increases_size": upscaler_increases_size,
681
+ "esrgan_tile": esrgan_tile,
682
+ "esrgan_tile_overlap": esrgan_tile_overlap,
683
+ "hires_steps": hires_steps,
684
+ "hires_denoising_strength": hires_denoising_strength,
685
+ "hires_prompt": hires_prompt,
686
+ "hires_negative_prompt": hires_negative_prompt,
687
+ "hires_sampler": hires_sampler,
688
+ "hires_before_adetailer": hires_before_adetailer,
689
+ "hires_after_adetailer": hires_after_adetailer,
690
+ "ip_adapter_image": params_ip_img,
691
+ "ip_adapter_mask": params_ip_msk,
692
+ "ip_adapter_model": params_ip_model,
693
+ "ip_adapter_mode": params_ip_mode,
694
+ "ip_adapter_scale": params_ip_scale,
695
+ }
696
+
697
+ # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
698
+ self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
699
+
700
+ progress(1, desc="Inference preparation completed. Starting inference...")
701
+
702
+ info_state = f"PROCESSING "
703
+ info_state += ">"
704
+ info_state = f"COMPLETED. Seeds: {str(seed)}"
705
+ if vae_msg:
706
+ info_state = info_state + "<br>" + vae_msg
707
+ if msg_lora:
708
+ info_state = info_state + "<br>" + "<br>".join(msg_lora)
709
+ return self.infer_short(self.model, pipe_params), info_state
710
+ ## END MOD
711
+
712
+
713
+ from pathlib import Path
714
+ from modutils import (
715
+ safe_float,
716
+ escape_lora_basename,
717
+ to_lora_key,
718
+ to_lora_path,
719
+ get_local_model_list,
720
+ get_private_lora_model_lists,
721
+ get_valid_lora_name,
722
+ get_valid_lora_path,
723
+ get_valid_lora_wt,
724
+ get_lora_info,
725
+ normalize_prompt_list,
726
+ get_civitai_info,
727
+ search_lora_on_civitai,
728
+ )
729
+
730
+ sd_gen = GuiSD()
731
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
732
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
733
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
734
+ sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
735
+ import PIL
736
+ import numpy as np
737
+ MAX_SEED = np.iinfo(np.int32).max
738
+
739
+ images: list[tuple[PIL.Image.Image, str | None]] = []
740
+ info: str = ""
741
+ progress(0, desc="Preparing...")
742
+
743
+ if randomize_seed:
744
+ seed = random.randint(0, MAX_SEED)
745
+
746
+ generator = torch.Generator().manual_seed(seed).seed()
747
+
748
+ prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
749
+ progress(0.5, desc="Preparing...")
750
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
751
+ set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
752
+ lora1 = get_valid_lora_path(lora1)
753
+ lora2 = get_valid_lora_path(lora2)
754
+ lora3 = get_valid_lora_path(lora3)
755
+ lora4 = get_valid_lora_path(lora4)
756
+ lora5 = get_valid_lora_path(lora5)
757
+ progress(1, desc="Preparation completed. Starting inference preparation...")
758
+
759
+ sd_gen.load_new_model(model_name, vae, task_model_list[0])
760
+ images, info = sd_gen.generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
761
+ guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
762
+ lora4, lora4_wt, lora5, lora5_wt, sampler,
763
+ height, width, model_name, vae, task_model_list[0], None, "Canny", 512, 1024,
764
+ None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
765
+ 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
766
+ False, True, 1, True, False, False, False, False, "./images", False, False, False, True, 1, 0.55,
767
+ False, False, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
768
+ False, "", "", 0.35, True, True, False, 4, 4, 32,
769
+ True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7
770
+ )
771
+
772
+ progress(1, desc="Inference completed.")
773
+ output_image = images[0][0] if images else None
774
+
775
+ return output_image
776
+
777
+
778
+ def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
779
+ model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
780
+ lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
781
+ sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
782
+ return gr.update(visible=True)
783
+
784
+
785
+ def pass_result(result):
786
+ return result
787
+
788
+
789
+ def get_samplers():
790
+ return scheduler_names
791
+
792
+
793
+ def get_vaes():
794
+ return vae_model_list
795
+
796
+
797
+ show_diffusers_model_list_detail = False
798
+ cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
799
+ def get_diffusers_model_list():
800
+ if show_diffusers_model_list_detail:
801
+ return cached_diffusers_model_tupled_list
802
+ else:
803
+ return load_diffusers_format_model
804
+
805
+
806
+ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = ""):
807
+ global show_diffusers_model_list_detail
808
+ show_diffusers_model_list_detail = is_enable
809
+ new_value = model_name
810
+ index = 0
811
+ if model_name in set(load_diffusers_format_model):
812
+ index = load_diffusers_format_model.index(model_name)
813
+ if is_enable:
814
+ new_value = cached_diffusers_model_tupled_list[index][1]
815
+ else:
816
+ new_value = load_diffusers_format_model[index]
817
+ return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list())
818
+
819
+
820
+ def get_t2i_model_info(repo_id: str):
821
+ from huggingface_hub import HfApi
822
+ api = HfApi()
823
+ try:
824
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
825
+ model = api.model_info(repo_id=repo_id)
826
+ except Exception as e:
827
+ print(f"Error: Failed to get {repo_id}'s info. ")
828
+ return ""
829
+ if model.private or model.gated: return ""
830
+ tags = model.tags
831
+ info = []
832
+ url = f"https://huggingface.co/{repo_id}/"
833
+ if not 'diffusers' in tags: return ""
834
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
835
+ info.append("SDXL")
836
+ elif 'diffusers:StableDiffusionPipeline' in tags:
837
+ info.append("SD1.5")
838
+ if model.card_data and model.card_data.tags:
839
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
840
+ info.append(f"DLs: {model.downloads}")
841
+ info.append(f"likes: {model.likes}")
842
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
843
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
844
+ return gr.update(value=md)
845
+
846
+
847
+ def load_model_prompt_dict():
848
+ import json
849
+ dict = {}
850
+ try:
851
+ with open('model_dict.json', encoding='utf-8') as f:
852
+ dict = json.load(f)
853
+ except Exception:
854
+ pass
855
+ return dict
856
+
857
+
858
+ model_prompt_dict = load_model_prompt_dict()
859
+
860
+
861
+ model_recom_prompt_enabled = True
862
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
863
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
864
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
865
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
866
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
867
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
868
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
869
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
870
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
871
+ if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
872
+ prompts = to_list(prompt)
873
+ neg_prompts = to_list(neg_prompt)
874
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
875
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
876
+ last_empty_p = [""] if not prompts and type != "None" else []
877
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
878
+ ps = []
879
+ nps = []
880
+ if model_name in model_prompt_dict.keys():
881
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
882
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
883
+ else:
884
+ ps = default_ps
885
+ nps = default_nps
886
+ prompts = prompts + ps
887
+ neg_prompts = neg_prompts + nps
888
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
889
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
890
+ return prompt, neg_prompt
891
+
892
+
893
+ def enable_model_recom_prompt(is_enable: bool = True):
894
+ global model_recom_prompt_enabled
895
+ model_recom_prompt_enabled = is_enable
896
+ return is_enable
897
+
898
+
899
+ private_lora_dict = {}
900
+ try:
901
+ with open('lora_dict.json', encoding='utf-8') as f:
902
+ d = json.load(f)
903
+ for k, v in d.items():
904
+ private_lora_dict[escape_lora_basename(k)] = v
905
+ except Exception:
906
+ pass
907
+
908
+
909
+ private_lora_model_list = get_private_lora_model_lists()
910
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
911
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
912
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
913
+ all_lora_list = []
914
+
915
+
916
+ def get_all_lora_list():
917
+ global all_lora_list
918
+ loras = get_lora_model_list()
919
+ all_lora_list = loras.copy()
920
+ return loras
921
+
922
+
923
+ def get_all_lora_tupled_list():
924
+ global loras_dict
925
+ models = get_all_lora_list()
926
+ if not models: return []
927
+ tupled_list = []
928
+ for model in models:
929
+ #if not model: continue # to avoid GUI-related bug
930
+ basename = Path(model).stem
931
+ key = to_lora_key(model)
932
+ items = None
933
+ if key in loras_dict.keys():
934
+ items = loras_dict.get(key, None)
935
+ else:
936
+ items = get_civitai_info(model)
937
+ if items != None:
938
+ loras_dict[key] = items
939
+ name = basename
940
+ value = model
941
+ if items and items[2] != "":
942
+ if items[1] == "Pony":
943
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
944
+ else:
945
+ name = f"{basename} (for {items[1]}, {items[2]})"
946
+ tupled_list.append((name, value))
947
+ return tupled_list
948
+
949
+
950
+ def update_lora_dict(path: str):
951
+ global loras_dict
952
+ key = to_lora_key(path)
953
+ if key in loras_dict.keys(): return
954
+ items = get_civitai_info(path)
955
+ if items == None: return
956
+ loras_dict[key] = items
957
+
958
+
959
+ def download_lora(dl_urls: str):
960
+ global loras_url_to_path_dict
961
+ dl_path = ""
962
+ before = get_local_model_list(directory_loras)
963
+ urls = []
964
+ for url in [url.strip() for url in dl_urls.split(',')]:
965
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
966
+ if not Path(local_path).exists():
967
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
968
+ urls.append(url)
969
+ after = get_local_model_list(directory_loras)
970
+ new_files = list_sub(after, before)
971
+ i = 0
972
+ for file in new_files:
973
+ path = Path(file)
974
+ if path.exists():
975
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
976
+ path.resolve().rename(new_path.resolve())
977
+ loras_url_to_path_dict[urls[i]] = str(new_path)
978
+ update_lora_dict(str(new_path))
979
+ dl_path = str(new_path)
980
+ i += 1
981
+ return dl_path
982
+
983
+
984
+ def copy_lora(path: str, new_path: str):
985
+ import shutil
986
+ if path == new_path: return new_path
987
+ cpath = Path(path)
988
+ npath = Path(new_path)
989
+ if cpath.exists():
990
+ try:
991
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
992
+ except Exception:
993
+ return None
994
+ update_lora_dict(str(npath))
995
+ return new_path
996
+ else:
997
+ return None
998
+
999
+
1000
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
1001
+ path = download_lora(dl_urls)
1002
+ if path:
1003
+ if not lora1 or lora1 == "None":
1004
+ lora1 = path
1005
+ elif not lora2 or lora2 == "None":
1006
+ lora2 = path
1007
+ elif not lora3 or lora3 == "None":
1008
+ lora3 = path
1009
+ elif not lora4 or lora4 == "None":
1010
+ lora4 = path
1011
+ elif not lora5 or lora5 == "None":
1012
+ lora5 = path
1013
+ choices = get_all_lora_tupled_list()
1014
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
1015
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
1016
+
1017
+
1018
+ def set_prompt_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1019
+ import re
1020
+ lora1 = get_valid_lora_name(lora1)
1021
+ lora2 = get_valid_lora_name(lora2)
1022
+ lora3 = get_valid_lora_name(lora3)
1023
+ lora4 = get_valid_lora_name(lora4)
1024
+ lora5 = get_valid_lora_name(lora5)
1025
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1026
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
1027
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
1028
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
1029
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
1030
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
1031
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1032
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1033
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1034
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1035
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1036
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1037
+ prompts = prompt.split(",") if prompt else []
1038
+ for p in prompts:
1039
+ p = str(p).strip()
1040
+ if "<lora" in p:
1041
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1042
+ if not result: continue
1043
+ key = result[0][0]
1044
+ wt = result[0][1]
1045
+ path = to_lora_path(key)
1046
+ if not key in loras_dict.keys() or not path:
1047
+ path = get_valid_lora_name(path)
1048
+ if not path or path == "None": continue
1049
+ if path in lora_paths:
1050
+ continue
1051
+ elif not on1:
1052
+ lora1 = path
1053
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1054
+ lora1_wt = safe_float(wt)
1055
+ on1 = True
1056
+ elif not on2:
1057
+ lora2 = path
1058
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1059
+ lora2_wt = safe_float(wt)
1060
+ on2 = True
1061
+ elif not on3:
1062
+ lora3 = path
1063
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1064
+ lora3_wt = safe_float(wt)
1065
+ on3 = True
1066
+ elif not on4:
1067
+ lora4 = path
1068
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1069
+ lora4_wt = safe_float(wt)
1070
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1071
+ elif not on5:
1072
+ lora5 = path
1073
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1074
+ lora5_wt = safe_float(wt)
1075
+ on5 = True
1076
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1077
+
1078
+
1079
+ def apply_lora_prompt(prompt: str, lora_info: str):
1080
+ if lora_info == "None": return gr.update(value=prompt)
1081
+ tags = prompt.split(",") if prompt else []
1082
+ prompts = normalize_prompt_list(tags)
1083
+ lora_tag = lora_info.replace("/",",")
1084
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
1085
+ lora_prompts = normalize_prompt_list(lora_tags)
1086
+ empty = [""]
1087
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
1088
+ return gr.update(value=prompt)
1089
+
1090
+
1091
+ def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1092
+ import re
1093
+ on1, label1, tag1, md1 = get_lora_info(lora1)
1094
+ on2, label2, tag2, md2 = get_lora_info(lora2)
1095
+ on3, label3, tag3, md3 = get_lora_info(lora3)
1096
+ on4, label4, tag4, md4 = get_lora_info(lora4)
1097
+ on5, label5, tag5, md5 = get_lora_info(lora5)
1098
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
1099
+ prompts = prompt.split(",") if prompt else []
1100
+ output_prompts = []
1101
+ for p in prompts:
1102
+ p = str(p).strip()
1103
+ if "<lora" in p:
1104
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
1105
+ if not result: continue
1106
+ key = result[0][0]
1107
+ wt = result[0][1]
1108
+ path = to_lora_path(key)
1109
+ if not key in loras_dict.keys() or not path: continue
1110
+ if path in lora_paths:
1111
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
1112
+ elif p:
1113
+ output_prompts.append(p)
1114
+ lora_prompts = []
1115
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
1116
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
1117
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
1118
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
1119
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
1120
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
1121
+ choices = get_all_lora_tupled_list()
1122
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
1123
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
1124
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
1125
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
1126
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
1127
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
1128
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
1129
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
1130
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
1131
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
1132
+
1133
+
1134
+ def search_civitai_lora(query, base_model):
1135
+ global civitai_lora_last_results
1136
+ items = search_lora_on_civitai(query, base_model)
1137
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
1138
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1139
+ civitai_lora_last_results = {}
1140
+ choices = []
1141
+ for item in items:
1142
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
1143
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
1144
+ value = item['dl_url']
1145
+ choices.append((name, value))
1146
+ civitai_lora_last_results[value] = item
1147
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
1148
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
1149
+ result = civitai_lora_last_results.get(choices[0][1], "None")
1150
+ md = result['md'] if result else ""
1151
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
1152
+ gr.update(visible=True), gr.update(visible=True)
1153
+
1154
+
1155
+ def select_civitai_lora(search_result):
1156
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
1157
+ result = civitai_lora_last_results.get(search_result, "None")
1158
+ md = result['md'] if result else ""
1159
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
1160
+
1161
+
1162
+ def search_civitai_lora_json(query, base_model):
1163
+ results = {}
1164
+ items = search_lora_on_civitai(query, base_model)
1165
+ if not items: return gr.update(value=results)
1166
+ for item in items:
1167
+ results[item['dl_url']] = item
1168
+ return gr.update(value=results)
1169
+
1170
+
1171
+ quality_prompt_list = [
1172
+ {
1173
+ "name": "None",
1174
+ "prompt": "",
1175
+ "negative_prompt": "lowres",
1176
+ },
1177
+ {
1178
+ "name": "Animagine Common",
1179
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
1180
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1181
+ },
1182
+ {
1183
+ "name": "Pony Anime Common",
1184
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
1185
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1186
+ },
1187
+ {
1188
+ "name": "Pony Common",
1189
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
1190
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
1191
+ },
1192
+ {
1193
+ "name": "Animagine Standard v3.0",
1194
+ "prompt": "masterpiece, best quality",
1195
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
1196
+ },
1197
+ {
1198
+ "name": "Animagine Standard v3.1",
1199
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
1200
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
1201
+ },
1202
+ {
1203
+ "name": "Animagine Light v3.1",
1204
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
1205
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
1206
+ },
1207
+ {
1208
+ "name": "Animagine Heavy v3.1",
1209
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
1210
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
1211
+ },
1212
+ ]
1213
+
1214
+
1215
+ style_list = [
1216
+ {
1217
+ "name": "None",
1218
+ "prompt": "",
1219
+ "negative_prompt": "",
1220
+ },
1221
+ {
1222
+ "name": "Cinematic",
1223
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
1224
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
1225
+ },
1226
+ {
1227
+ "name": "Photographic",
1228
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
1229
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
1230
+ },
1231
+ {
1232
+ "name": "Anime",
1233
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
1234
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
1235
+ },
1236
+ {
1237
+ "name": "Manga",
1238
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
1239
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
1240
+ },
1241
+ {
1242
+ "name": "Digital Art",
1243
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
1244
+ "negative_prompt": "photo, photorealistic, realism, ugly",
1245
+ },
1246
+ {
1247
+ "name": "Pixel art",
1248
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
1249
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
1250
+ },
1251
+ {
1252
+ "name": "Fantasy art",
1253
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
1254
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
1255
+ },
1256
+ {
1257
+ "name": "Neonpunk",
1258
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
1259
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
1260
+ },
1261
+ {
1262
+ "name": "3D Model",
1263
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
1264
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
1265
+ },
1266
+ ]
1267
+
1268
+
1269
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1270
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1271
+
1272
+
1273
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None"):
1274
+ def to_list(s):
1275
+ return [x.strip() for x in s.split(",") if not s == ""]
1276
+
1277
+ def list_sub(a, b):
1278
+ return [e for e in a if e not in b]
1279
+
1280
+ def list_uniq(l):
1281
+ return sorted(set(l), key=l.index)
1282
+
1283
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1284
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1285
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1286
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1287
+ prompts = to_list(prompt)
1288
+ neg_prompts = to_list(neg_prompt)
1289
+
1290
+ all_styles_ps = []
1291
+ all_styles_nps = []
1292
+ for d in style_list:
1293
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1294
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1295
+
1296
+ all_quality_ps = []
1297
+ all_quality_nps = []
1298
+ for d in quality_prompt_list:
1299
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1300
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1301
+
1302
+ quality_ps = to_list(preset_quality[quality_key][0])
1303
+ quality_nps = to_list(preset_quality[quality_key][1])
1304
+ styles_ps = to_list(preset_styles[styles_key][0])
1305
+ styles_nps = to_list(preset_styles[styles_key][1])
1306
+
1307
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1308
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1309
+
1310
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1311
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1312
+
1313
+ if type == "Animagine":
1314
+ prompts = prompts + animagine_ps
1315
+ neg_prompts = neg_prompts + animagine_nps
1316
+ elif type == "Pony":
1317
+ prompts = prompts + pony_ps
1318
+ neg_prompts = neg_prompts + pony_nps
1319
+
1320
+ prompts = prompts + styles_ps + quality_ps
1321
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1322
+
1323
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1324
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1325
+
1326
+ return gr.update(value=prompt), gr.update(value=neg_prompt)
1327
+
1328
+
env.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
4
+ hf_token = os.environ.get("HF_TOKEN")
5
+ hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
6
+
7
+ # - **List Models**
8
+ load_diffusers_format_model = [
9
+ 'votepurchase/animagine-xl-3.1',
10
+ 'votepurchase/NSFW-GEN-ANIME-v2',
11
+ 'votepurchase/kivotos-xl-2.0',
12
+ 'votepurchase/holodayo-xl-2.1',
13
+ 'votepurchase/ponyDiffusionV6XL',
14
+ 'votepurchase/AnythingXL_xl',
15
+ 'votepurchase/7thAnimeXLPonyA_v10',
16
+ 'votepurchase/ChilloutMix',
17
+ 'votepurchase/NovelAIRemix',
18
+ 'votepurchase/NSFW-gen-v2',
19
+ 'votepurchase/PerfectDeliberate-Anime_v2',
20
+ 'votepurchase/realpony-xl',
21
+ 'votepurchase/artiwaifu-diffusion-1.0',
22
+ 'votepurchase/Starry-XL-v5.2',
23
+ 'votepurchase/Yaki-Dofu-Mix',
24
+ 'votepurchase/ebara-pony-v1-sdxl',
25
+ 'votepurchase/waiANIMIXPONYXL_v10',
26
+ 'votepurchase/counterfeitV30_v30',
27
+ 'votepurchase/ebara-pony',
28
+ 'votepurchase/Realistic_Vision_V1.4',
29
+ 'votepurchase/pony',
30
+ 'votepurchase/ponymatureSDXL_ponyeclipse10',
31
+ 'votepurchase/waiREALMIX_v70',
32
+ 'votepurchase/waiREALCN_v10',
33
+ 'votepurchase/PVCStyleModelMovable_pony151',
34
+ 'votepurchase/PVCStyleModelMovable_beta27Realistic',
35
+ 'votepurchase/PVCStyleModelFantasy_beta12',
36
+ 'votepurchase/pvcxl-v1-lora',
37
+ 'votepurchase/Realistic_Vision_V2.0',
38
+ 'votepurchase/RealVisXL_V4.0',
39
+ 'votepurchase/juggernautXL_hyper_8step_sfw',
40
+ 'votepurchase/ponyRealism_v21MainVAE',
41
+ 'stabilityai/stable-diffusion-xl-base-1.0',
42
+ 'cagliostrolab/animagine-xl-3.1',
43
+ 'misri/epicrealismXL_v7FinalDestination',
44
+ 'misri/juggernautXL_juggernautX',
45
+ 'misri/zavychromaxl_v80',
46
+ 'SG161222/RealVisXL_V4.0',
47
+ 'misri/newrealityxlAllInOne_Newreality40',
48
+ 'eienmojiki/Anything-XL',
49
+ 'eienmojiki/Starry-XL-v5.2',
50
+ 'gsdf/CounterfeitXL',
51
+ 'kitty7779/ponyDiffusionV6XL',
52
+ 'yodayo-ai/clandestine-xl-1.0',
53
+ 'yodayo-ai/kivotos-xl-2.0',
54
+ 'yodayo-ai/holodayo-xl-2.1',
55
+ 'digiplay/majicMIX_sombre_v2',
56
+ 'digiplay/majicMIX_realistic_v6',
57
+ 'digiplay/majicMIX_realistic_v7',
58
+ 'digiplay/DreamShaper_8',
59
+ 'digiplay/BeautifulArt_v1',
60
+ 'digiplay/DarkSushi2.5D_v1',
61
+ 'digiplay/darkphoenix3D_v1.1',
62
+ 'digiplay/BeenYouLiteL11_diffusers',
63
+ 'rubbrband/revAnimated_v2Rebirth',
64
+ 'youknownothing/cyberrealistic_v50',
65
+ 'votepurchase/counterfeitV30_v30',
66
+ 'Meina/MeinaMix_V11',
67
+ 'Meina/MeinaUnreal_V5',
68
+ 'Meina/MeinaPastel_V7',
69
+ 'rubbrband/realcartoon3d_v16',
70
+ 'rubbrband/realcartoonRealistic_v14',
71
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev2',
72
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
73
+ 'KBlueLeaf/Kohaku-XL-Zeta',
74
+ 'kayfahaarukku/UrangDiffusion-1.2',
75
+ 'Eugeoter/artiwaifu-diffusion-1.0',
76
+ 'Raelina/Rae-Diffusion-XL-V2',
77
+ 'Raelina/Raemu-XL-V4',
78
+ ]
79
+
80
+ # List all Models for specified user
81
+ HF_MODEL_USER_LIKES = ["votepurchase"] # sorted by number of likes
82
+ HF_MODEL_USER_EX = ["John6666"] # sorted by a special rule
83
+
84
+
85
+ # - **Download Models**
86
+ download_model_list = [
87
+ ]
88
+
89
+ # - **Download VAEs**
90
+ download_vae_list = [
91
+ 'https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true',
92
+ 'https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true',
93
+ "https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/blob/main/sdxl_vae-fp16fix-blessed.safetensors",
94
+ "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.ckpt",
95
+ "https://huggingface.co/stabilityai/sd-vae-ft-ema-original/resolve/main/vae-ft-ema-560000-ema-pruned.ckpt",
96
+ ]
97
+
98
+ # - **Download LoRAs**
99
+ download_lora_list = [
100
+ ]
101
+
102
+ # Download Embeddings
103
+ download_embeds = [
104
+ 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
105
+ 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
106
+ 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
107
+ ]
108
+
109
+ directory_models = 'models'
110
+ os.makedirs(directory_models, exist_ok=True)
111
+ directory_loras = 'loras'
112
+ os.makedirs(directory_loras, exist_ok=True)
113
+ directory_vaes = 'vaes'
114
+ os.makedirs(directory_vaes, exist_ok=True)
115
+ directory_embeds = 'embedings'
116
+ os.makedirs(directory_embeds, exist_ok=True)
117
+
118
+ directory_embeds_sdxl = 'embedings_xl'
119
+ os.makedirs(directory_embeds_sdxl, exist_ok=True)
120
+ directory_embeds_positive_sdxl = 'embedings_xl/positive'
121
+ os.makedirs(directory_embeds_positive_sdxl, exist_ok=True)
122
+
123
+ HF_LORA_PRIVATE_REPOS1 = ['John6666/loratest1', 'John6666/loratest3', 'John6666/loratest4', 'John6666/loratest6']
124
+ HF_LORA_PRIVATE_REPOS2 = ['John6666/loratest10', 'John6666/loratest11','John6666/loratest'] # to be sorted as 1 repo
125
+ HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
126
+ HF_LORA_ESSENTIAL_PRIVATE_REPO = 'John6666/loratest1' # to be downloaded on run app
127
+ HF_VAE_PRIVATE_REPO = 'John6666/vaetest'
128
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO = 'John6666/embeddingstest'
129
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO = 'John6666/embeddingspositivetest'
ja_to_danbooru/character_series_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/danbooru_tagtype_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_danbooru_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
ja_to_danbooru/ja_to_danbooru.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ from pathlib import Path
4
+
5
+
6
+ def load_json_dict(path: str):
7
+ import json
8
+ from pathlib import Path
9
+ dict = {}
10
+ if not Path(path).exists(): return dict
11
+ try:
12
+ with open(path, encoding='utf-8') as f:
13
+ dict = json.load(f)
14
+ except Exception:
15
+ print(f"Failed to open dictionary file: {path}")
16
+ return dict
17
+ return dict
18
+
19
+
20
+ ja_danbooru_dict = load_json_dict('ja_danbooru_dict.json')
21
+ char_series_dict = load_json_dict('character_series_dict.json')
22
+ tagtype_dict = load_json_dict('danbooru_tagtype_dict.json')
23
+
24
+
25
+ def jatags_to_danbooru_tags(jatags: list[str]):
26
+ from rapidfuzz.process import extractOne
27
+ from rapidfuzz.utils import default_process
28
+ keys = list(ja_danbooru_dict.keys())
29
+ ckeys = list(char_series_dict.keys())
30
+ tags = []
31
+ for jatag in jatags:
32
+ jatag = str(jatag).strip()
33
+ s = default_process(str(jatag))
34
+ e1 = extractOne(s, keys, processor=default_process, score_cutoff=90.0)
35
+ if e1:
36
+ tag = str(ja_danbooru_dict[e1[0]])
37
+ tags.append(tag)
38
+ if tag in tagtype_dict.keys() and tagtype_dict[tag] == "character":
39
+ cs = default_process(tag)
40
+ ce1 = extractOne(cs, ckeys, processor=default_process, score_cutoff=95.0)
41
+ if ce1:
42
+ series = str(char_series_dict[ce1[0]])
43
+ tags.append(series)
44
+ return tags
45
+
46
+
47
+ def jatags_to_danbooru(input_tag, input_file, output_file, is_append):
48
+ if input_file and Path(input_file).exists():
49
+ try:
50
+ with open(input_file, 'r', encoding='utf-8') as f:
51
+ input_tag = f.read()
52
+ except Exception:
53
+ print(f"Failed to open input file: {input_file}")
54
+ ja_tags = [tag.strip() for tag in input_tag.split(",")] if input_tag else []
55
+ tags = jatags_to_danbooru_tags(ja_tags)
56
+ output_tags = ja_tags + tags if is_append else tags
57
+ output_tag = ", ".join(output_tags)
58
+ if output_file:
59
+ try:
60
+ with open(output_file, mode='w', encoding="utf-8") as f:
61
+ f.write(output_tag)
62
+ except Exception:
63
+ print(f"Failed to write output file: {output_file}")
64
+ else:
65
+ print(output_tag)
66
+ return output_tag
67
+
68
+
69
+ if __name__ == "__main__":
70
+ parser = argparse.ArgumentParser()
71
+ parser.add_argument("--tags", default=None, type=str, required=False, help="Input tags.")
72
+ parser.add_argument("--file", default=None, type=str, required=False, help="Input tags from a text file.")
73
+ parser.add_argument("--out", default=None, type=str, help="Output to text file.")
74
+ parser.add_argument("--append", default=False, type=bool, help="Whether the output contains the input tags or not.")
75
+
76
+ args = parser.parse_args()
77
+ assert (args.tags, args.file) != (None, None), "Must provide --tags or --file!"
78
+
79
+ jatags_to_danbooru(args.tags, args.file, args.out, args.append)
80
+
81
+
82
+ # Usage:
83
+ # python ja_to_danbooru.py --tags "女の子, 大室櫻子"
84
+ # python danbooru_to_ja.py --file inputtag.txt
85
+ # python danbooru_to_ja.py --file inputtag.txt --append True
86
+ # Datasets: https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715
87
+ # Datasets: https://github.com/ponapon280/danbooru-e621-converter
llmdolphin.py ADDED
@@ -0,0 +1,884 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from llama_cpp import Llama
4
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
5
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
6
+ from llama_cpp_agent.chat_history import BasicChatHistory
7
+ from llama_cpp_agent.chat_history.messages import Roles
8
+ from ja_to_danbooru.ja_to_danbooru import jatags_to_danbooru_tags
9
+ import wrapt_timeout_decorator
10
+
11
+
12
+ llm_models_dir = "./llm_models"
13
+ llm_models = {
14
+ #"": ["", MessagesFormatterType.LLAMA_3],
15
+ #"": ["", MessagesFormatterType.MISTRAL],
16
+ #"": ["", MessagesFormatterType.ALPACA],
17
+ #"": ["", MessagesFormatterType.OPEN_CHAT],
18
+ #"": ["", MessagesFormatterType.CHATML],
19
+ #"": ["", MessagesFormatterType.PHI_3],
20
+ "mn-12b-lyra-v2a1-q5_k_m.gguf": ["HalleyStarbun/MN-12B-Lyra-v2a1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
21
+ "L3-8B-Tamamo-v1.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Tamamo-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
22
+ "Llama-3.1-8B-EZO-1.1-it.Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-EZO-1.1-it-GGUF", MessagesFormatterType.MISTRAL],
23
+ "MN-12B-Starcannon-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
24
+ "MN-12B-Starcannon-v2.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v2-i1-GGUF", MessagesFormatterType.CHATML],
25
+ "MN-12B-Starcannon-v3.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v3-i1-GGUF", MessagesFormatterType.CHATML],
26
+ "MN-12B-Starcannon-v4-unofficial.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starcannon-v4-unofficial-i1-GGUF", MessagesFormatterType.MISTRAL],
27
+ "MN-12B-Starsong-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Starsong-v1-i1-GGUF", MessagesFormatterType.CHATML],
28
+ "Lumimaid-Magnum-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-Magnum-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
29
+ "Nemo-12B-Marlin-v1.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
30
+ "Nemo-12B-Marlin-v2.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
31
+ "Nemo-12B-Marlin-v3.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v3-GGUF", MessagesFormatterType.MISTRAL],
32
+ "Nemo-12B-Marlin-v4.i1-Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v4-i1-GGUF", MessagesFormatterType.MISTRAL],
33
+ "Nemo-12B-Marlin-v5-Q4_K_M.gguf": ["starble-dev/Nemo-12B-Marlin-v5-GGUF", MessagesFormatterType.CHATML],
34
+ "Nemo-12B-Marlin-v7.Q4_K_M.gguf": ["mradermacher/Nemo-12B-Marlin-v7-GGUF", MessagesFormatterType.MISTRAL],
35
+ "Nemo-12B-Marlin-v8.Q4_K_S.gguf": ["mradermacher/Nemo-12B-Marlin-v8-GGUF", MessagesFormatterType.MISTRAL],
36
+ "NemoDori-v0.2-Upscaled.1-14B.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-Upscaled.1-14B-GGUF", MessagesFormatterType.MISTRAL],
37
+ "Fireball-12B-v1.0.i1-Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
38
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.2a.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.2a-GGUF", MessagesFormatterType.MISTRAL],
39
+ "T-III-12B.Q4_K_M.gguf": ["mradermacher/T-III-12B-GGUF", MessagesFormatterType.CHATML],
40
+ "T-IIIa-12B.Q4_K_S.gguf": ["mradermacher/T-IIIa-12B-GGUF", MessagesFormatterType.MISTRAL],
41
+ "StorieCreative.i1-Q4_K_S.gguf": ["mradermacher/StorieCreative-i1-GGUF", MessagesFormatterType.MISTRAL],
42
+ "Deutscher-Pantheon-12B.Q4_K_M.gguf": ["mradermacher/Deutscher-Pantheon-12B-GGUF", MessagesFormatterType.MISTRAL],
43
+ "guns-and-roses-r1-Q4_K_L-imat.gguf": ["Reiterate3680/guns-and-roses-r1-GGUF", MessagesFormatterType.MISTRAL],
44
+ "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
45
+ "nemo-12b-hiwaifu-Q4_K_L-imat.gguf": ["Reiterate3680/nemo-12b-hiwaifu-GGUF", MessagesFormatterType.MISTRAL],
46
+ "Soliloquy-7B-v3-Q4_K_L-imat.gguf": ["Reiterate3680/Soliloquy-7B-v3-GGUF", MessagesFormatterType.OPEN_CHAT],
47
+ "Lyra-Gutenberg-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Lyra-Gutenberg-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
48
+ "Gutensuppe-mistral-nemo-12B.Q4_K_M.gguf": ["mradermacher/Gutensuppe-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
49
+ "IceTea21EnergyDrinkRPV13-dpo240-Q8_0.gguf": ["icefog72/IceTea21EnergyDrinkRPV13-dpo240-gguf", MessagesFormatterType.MISTRAL],
50
+ "Instant-RP-Noodles-12B.Q4_K_M.gguf": ["mradermacher/Instant-RP-Noodles-12B-GGUF", MessagesFormatterType.MISTRAL],
51
+ "Violet_Twilight-v0.1_q4_K_M.gguf": ["Epiculous/Violet_Twilight-v0.1-GGUF", MessagesFormatterType.MISTRAL],
52
+ "Llama3.1-vodka.Q4_K_S.gguf": ["mradermacher/Llama3.1-vodka-GGUF", MessagesFormatterType.MISTRAL],
53
+ "L3.1-Pyro-Mantus-v0.1c-8B.q5_k_m.gguf": ["kromquant/L3.1-Pyro-Mantus-v0.1c-8B-GGUFs", MessagesFormatterType.MISTRAL],
54
+ "Llama-3.1-8B-ArliAI-RPMax-v1.1-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.1-GGUF", MessagesFormatterType.MISTRAL],
55
+ "l3-notcrazy-8b-q4_k_m.gguf": ["bunnycore/L3-NotCrazy-8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
56
+ "Maverick-8B.Q5_K_M.gguf": ["RichardErkhov/bunnycore_-_Maverick-8B-gguf", MessagesFormatterType.LLAMA_3],
57
+ "Fireball-12B-v1.01a.Q4_K_M.gguf": ["mradermacher/Fireball-12B-v1.01a-GGUF", MessagesFormatterType.CHATML],
58
+ "Loki-v5.2.Q5_K_M.gguf": ["mradermacher/Loki-v5.2-GGUF", MessagesFormatterType.MISTRAL],
59
+ "Loki-v5.1.Q5_K_M.gguf": ["mradermacher/Loki-v5.1-GGUF", MessagesFormatterType.MISTRAL],
60
+ "GracieRP-freefallenLora-Gemma2-Inst-9B.i1-Q4_K_M.gguf": ["mradermacher/GracieRP-freefallenLora-Gemma2-Inst-9B-i1-GGUF", MessagesFormatterType.ALPACA],
61
+ "mistral-nemo-gutenberg-12B-v4.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v4-GGUF", MessagesFormatterType.MISTRAL],
62
+ "FunkyMerge-12b-0.1.Q4_K_M.gguf": ["mradermacher/FunkyMerge-12b-0.1-GGUF", MessagesFormatterType.MISTRAL],
63
+ "NemoMix-Unleashed-12B-Q4_K_M.gguf": ["bartowski/NemoMix-Unleashed-12B-GGUF", MessagesFormatterType.MISTRAL],
64
+ "IceTea21EnergyDrinkRPV13.Q4_K_S.gguf": ["mradermacher/IceTea21EnergyDrinkRPV13-GGUF", MessagesFormatterType.MISTRAL],
65
+ "MegaBeam-Mistral-7B-512k-Q5_K_M.gguf": ["bartowski/MegaBeam-Mistral-7B-512k-GGUF", MessagesFormatterType.MISTRAL],
66
+ "azur-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Azur-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
67
+ "Chronos-Gold-12B-1.0-Q4_K_M.gguf": ["bartowski/Chronos-Gold-12B-1.0-GGUF", MessagesFormatterType.MISTRAL],
68
+ "L3.1-Romes-Ninomos-Maxxing.Q5_K_M.gguf": ["mradermacher/L3.1-Romes-Ninomos-Maxxing-GGUF", MessagesFormatterType.LLAMA_3],
69
+ "mistral-nemo-minitron-8b-base-q4_k_m.gguf": ["Daemontatox/Mistral-NeMo-Minitron-8B-Base-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
70
+ "Nokstella_coder-8B-model_stock.i1-Q4_K_S.gguf": ["mradermacher/Nokstella_coder-8B-model_stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
71
+ "vtion_model_v1.Q5_K_M.gguf": ["mradermacher/vtion_model_v1-GGUF", MessagesFormatterType.LLAMA_3],
72
+ "storiecreative-q5_k_m.gguf": ["ClaudioItaly/StorieCreative-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
73
+ "L3.1-gramamax.Q5_K_M.gguf": ["mradermacher/L3.1-gramamax-GGUF", MessagesFormatterType.MISTRAL],
74
+ "Evolutionstory128.Q5_K_M.gguf": ["mradermacher/Evolutionstory128-GGUF", MessagesFormatterType.CHATML],
75
+ "sellen-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Sellen-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
76
+ "nokstella_coder-8b-model_stock-q4_k_m.gguf": ["DreadPoor/Nokstella_coder-8B-model_stock-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
77
+ "Ultra-Instruct-12B-Q4_K_M.gguf": ["bartowski/Ultra-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
78
+ "L3.1-Sithamo-v0.4-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.4-8B-GGUFs", MessagesFormatterType.MISTRAL],
79
+ "Berry-Spark-7B-Fix.Q5_K_M.gguf": ["mradermacher/Berry-Spark-7B-Fix-GGUF", MessagesFormatterType.OPEN_CHAT],
80
+ "llama3.1-gutenberg-8B.Q4_K_S.gguf": ["mradermacher/llama3.1-gutenberg-8B-GGUF", MessagesFormatterType.LLAMA_3],
81
+ "L3.1-Romes-Ninomos.Q4_K_S.gguf": ["mradermacher/L3.1-Romes-Ninomos-GGUF", MessagesFormatterType.LLAMA_3],
82
+ "nemo-12b-summarizer-de-v3.Q4_K_M.gguf": ["mradermacher/nemo-12b-summarizer-de-v3-GGUF", MessagesFormatterType.MISTRAL],
83
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q5_K_M.gguf": ["darkshapes/suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
84
+ "Fireball-Mistral-Nemo-Base-2407-sft-v2.1.Q4_K_M.gguf": ["mradermacher/Fireball-Mistral-Nemo-Base-2407-sft-v2.1-GGUF", MessagesFormatterType.MISTRAL],
85
+ "gemma-2-9B-it-advanced-v2.1-Q5_K_M.gguf": ["jsgreenawalt/gemma-2-9B-it-advanced-v2.1-GGUF", MessagesFormatterType.ALPACA],
86
+ "mistral-12b-neptune-6k-instruct.Q4_K_M.gguf": ["mradermacher/mistral-12b-neptune-6k-instruct-GGUF", MessagesFormatterType.MISTRAL],
87
+ "evolutionstory-q5_k_m.gguf": ["ClaudioItaly/Evolutionstory-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
88
+ "AuraFinal12B-Q4_K_L-imat.gguf": ["Reiterate3680/AuraFinal12B-GGUF", MessagesFormatterType.MISTRAL],
89
+ "Hollow-Tail-V1-12B-Q5_K_M.gguf": ["starble-dev/Hollow-Tail-V1-12B-GGUF", MessagesFormatterType.MISTRAL],
90
+ "IceSakeRPTrainingTestV1-7b.Q5_K_M.gguf": ["mradermacher/IceSakeRPTrainingTestV1-7b-GGUF", MessagesFormatterType.MISTRAL],
91
+ "IceTea21EnergyDrinkRPV10.Q5_K_M.gguf": ["mradermacher/IceTea21EnergyDrinkRPV10-GGUF", MessagesFormatterType.MISTRAL],
92
+ "MN-LooseCannon-12B-v2-Q4_K_L-imat.gguf": ["Reiterate3680/MN-LooseCannon-12B-v2-GGUF", MessagesFormatterType.CHATML],
93
+ "MN-MT3-m4-12B-Q4_K_L-imat.gguf": ["Reiterate3680/MN-MT3-m4-12B-GGUF", MessagesFormatterType.CHATML],
94
+ "Mahou-Gutenberg-Nemo-12B.Q4_K_M.gguf": ["mradermacher/Mahou-Gutenberg-Nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
95
+ "Mahou-1.3-llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Mahou-1.3-llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
96
+ "gemma-advanced-v1.Q4_K_M.gguf": ["QuantFactory/gemma-advanced-v1-GGUF", MessagesFormatterType.ALPACA],
97
+ "flammen21X-mistral-7B-Q5_K_M.gguf": ["duyntnet/flammen21X-mistral-7B-imatrix-GGUF", MessagesFormatterType.MISTRAL],
98
+ "Magnum-Instruct-DPO-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-DPO-12B-GGUF", MessagesFormatterType.MISTRAL],
99
+ "Carasique-v0.3b.Q4_K_S.gguf": ["mradermacher/Carasique-v0.3b-GGUF", MessagesFormatterType.MISTRAL],
100
+ "MN-12b-Sunrose-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12b-Sunrose-GGUF", MessagesFormatterType.MISTRAL],
101
+ "OpenChat-3.5-7B-SOLAR-v2.0.i1-Q4_K_M.gguf": ["mradermacher/OpenChat-3.5-7B-SOLAR-v2.0-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
102
+ "Carasique-v0.3.Q4_K_M.gguf": ["mradermacher/Carasique-v0.3-GGUF", MessagesFormatterType.MISTRAL],
103
+ "Crimson_Dawn-V0.1.Q4_K_M.gguf": ["mradermacher/Crimson_Dawn-V0.1-GGUF", MessagesFormatterType.MISTRAL],
104
+ "Samantha-hermes3-8b-model-fixed.i1-Q5_K_M.gguf": ["mradermacher/Samantha-hermes3-8b-model-fixed-i1-GGUF", MessagesFormatterType.MISTRAL],
105
+ "Hermes-3-Llama-3.1-8B-lorablated-Q5_K_M.gguf": ["bartowski/Hermes-3-Llama-3.1-8B-lorablated-GGUF", MessagesFormatterType.LLAMA_3],
106
+ "stratagem-instruct-12b.i1-Q4_K_M.gguf": ["mradermacher/stratagem-instruct-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
107
+ "omed-llama3.1-8b.Q5_K_M.gguf": ["mradermacher/omed-llama3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
108
+ "omed-gemma2-9b.i1-Q4_K_M.gguf": ["mradermacher/omed-gemma2-9b-i1-GGUF", MessagesFormatterType.ALPACA],
109
+ "L3.1-Siithamo-v0.3-8B.q5_k_m.gguf": ["kromquant/L3.1-Siithamo-v0.3-8B-GGUFs", MessagesFormatterType.LLAMA_3],
110
+ "mistral-nemo-gutenberg-12B-v3.i1-Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v3-i1-GGUF", MessagesFormatterType.MISTRAL],
111
+ "MN-12B-Tarsus-Q4_K_L-imat.gguf": ["Reiterate3680/MN-12B-Tarsus-GGUF", MessagesFormatterType.MISTRAL],
112
+ "Magnum-Instruct-12B.Q4_K_M.gguf": ["mradermacher/Magnum-Instruct-12B-GGUF", MessagesFormatterType.MISTRAL],
113
+ "Rocinante-12B-v1.i1-Q4_K_M.gguf": ["mradermacher/Rocinante-12B-v1-i1-GGUF", MessagesFormatterType.MISTRAL],
114
+ "Llama-3.1-Storm-8B-Q5_K_M.gguf": ["bartowski/Llama-3.1-Storm-8B-GGUF", MessagesFormatterType.MISTRAL],
115
+ "Tess-3-Mistral-Nemo-12B.i1-Q4_K_M.gguf": ["mradermacher/Tess-3-Mistral-Nemo-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
116
+ "Hermes-3-Llama-3.1-8B.Q5_K_M.gguf": ["mradermacher/Hermes-3-Llama-3.1-8B-GGUF", MessagesFormatterType.MISTRAL],
117
+ "Roleplay-Hermes-3-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Roleplay-Hermes-3-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
118
+ "Dusk_Rainbow_Ep03-Q5_K_M.gguf": ["SicariusSicariiStuff/Dusk_Rainbow_GGUFs", MessagesFormatterType.LLAMA_3],
119
+ "NemoReRemix-12B-Q4_K_M.gguf": ["bartowski/NemoReRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
120
+ "Aura-NeMo-12B-Q4_K_L-imat.gguf": ["Reiterate3680/Aura-NeMo-12B-GGUF", MessagesFormatterType.MISTRAL],
121
+ "TypeII-12B.Q4_K_S.gguf": ["mradermacher/TypeII-12B-GGUF", MessagesFormatterType.MISTRAL],
122
+ "TypeII-A-12B.Q4_K_M.gguf": ["mradermacher/TypeII-A-12B-GGUF", MessagesFormatterType.CHATML],
123
+ "yuna-ai-v3-atomic-q_4_k_m.gguf": ["yukiarimo/yuna-ai-v3-atomic", MessagesFormatterType.CHATML],
124
+ "Peach-9B-8k-Roleplay-Q4_K_M.gguf": ["bartowski/Peach-9B-8k-Roleplay-GGUF", MessagesFormatterType.LLAMA_3],
125
+ "heartstolen_model-stock_8b-q4_k_m.gguf": ["DreadPoor/HeartStolen_model-stock_8B-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
126
+ "Llama-3.1-8B-ArliAI-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-Formax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
127
+ "ArliAI-Llama-3-8B-Formax-v1.0-Q5_K_M.gguf": ["ArliAI/ArliAI-Llama-3-8B-Formax-v1.0-GGUF", MessagesFormatterType.LLAMA_3],
128
+ "Llama-3.1-8B-ArliAI-RPMax-v1.0-Q5_K_M.gguf": ["ArliAI/Llama-3.1-8B-ArliAI-RPMax-v1.0-GGUF", MessagesFormatterType.MISTRAL],
129
+ "badger-writer-llama-3-8b-q4_k_m.gguf": ["A2va/badger-writer-llama-3-8b-Q4_K_M-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "magnum-12b-v2.5-kto-Q4_K_L-imat.gguf": ["Reiterate3680/magnum-12b-v2.5-kto-GGUF", MessagesFormatterType.CHATML],
131
+ "CeleMo-Instruct-128k.Q4_K_S.gguf": ["mradermacher/CeleMo-Instruct-128k-GGUF", MessagesFormatterType.CHATML],
132
+ "KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge.q3_k_l.gguf": ["AlekseiPravdin/KukulStanta-7B-Seamaiiza-7B-v1-slerp-merge-gguf", MessagesFormatterType.MISTRAL],
133
+ "HolyNemo-12B.Q4_K_M.gguf": ["mradermacher/HolyNemo-12B-GGUF", MessagesFormatterType.MISTRAL],
134
+ "mistral-nemo-gutenberg-12B-v2.Q4_K_M.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-v2-GGUF", MessagesFormatterType.MISTRAL],
135
+ "KukulStanta-InfinityRP-7B-slerp.Q5_K_M.gguf": ["mradermacher/KukulStanta-InfinityRP-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
136
+ "Rocinante-12B-v1a-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v1a-GGUF", MessagesFormatterType.MISTRAL],
137
+ "gemma-2-9b-it-WPO-HB.Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-WPO-HB-GGUF", MessagesFormatterType.ALPACA],
138
+ "mistral-nemo-bophades-12B.Q4_K_M.gguf": ["mradermacher/mistral-nemo-bophades-12B-GGUF", MessagesFormatterType.MISTRAL],
139
+ "Stella-mistral-nemo-12B.Q4_K_S.gguf": ["mradermacher/Stella-mistral-nemo-12B-GGUF", MessagesFormatterType.MISTRAL],
140
+ "Gemma-2-Ataraxy-9B.Q4_K_M.gguf": ["mradermacher/Gemma-2-Ataraxy-9B-GGUF", MessagesFormatterType.ALPACA],
141
+ "NemoRemix-Magnum_V2_Base-12B.Q4_K_S.gguf": ["mradermacher/NemoRemix-Magnum_V2_Base-12B-GGUF", MessagesFormatterType.MISTRAL],
142
+ "Synatra-7B-v0.3-dpo.Q5_K_M.gguf": ["mradermacher/Synatra-7B-v0.3-dpo-GGUF", MessagesFormatterType.MISTRAL],
143
+ "OpenCrystal-12B-Instruct.Q4_K_M.gguf": ["mradermacher/OpenCrystal-12B-Instruct-GGUF", MessagesFormatterType.MISTRAL],
144
+ "dolphinmaid_l3-1_01sl-q5ks.gguf": ["Dunjeon/DolphinMaid_L3.1_8B-01_GGUF", MessagesFormatterType.LLAMA_3],
145
+ "TypeI-12B.Q4_K_S.gguf": ["mradermacher/TypeI-12B-GGUF", MessagesFormatterType.CHATML],
146
+ "lyralin-12b-v1-q5_k_m.gguf": ["NGalrion/Lyralin-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
147
+ "margnum-12b-v1-q5_k_m.gguf": ["NGalrion/Margnum-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.CHATML],
148
+ "L3-Boshima-a.Q5_K_M.gguf": ["mradermacher/L3-Boshima-a-GGUF", MessagesFormatterType.LLAMA_3],
149
+ "canidori-12b-v1-q5_k_m.gguf": ["NGalrion/Canidori-12B-v1-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
150
+ "MN-12B-Estrella-v1.Q4_K_S.gguf": ["mradermacher/MN-12B-Estrella-v1-GGUF", MessagesFormatterType.CHATML],
151
+ "gemmaomni2-2b-q5_k_m.gguf": ["bunnycore/GemmaOmni2-2B-Q5_K_M-GGUF", MessagesFormatterType.ALPACA],
152
+ "MN-LooseCannon-12B-v1.Q4_K_M.gguf": ["mradermacher/MN-LooseCannon-12B-v1-GGUF", MessagesFormatterType.CHATML],
153
+ "Pleiades-12B-v1.Q4_K_M.gguf": ["mradermacher/Pleiades-12B-v1-GGUF", MessagesFormatterType.CHATML],
154
+ "mistral-nemo-gutenberg-12B.Q4_K_S.gguf": ["mradermacher/mistral-nemo-gutenberg-12B-GGUF", MessagesFormatterType.MISTRAL],
155
+ "gemma2-gutenberg-9B.Q4_K_M.gguf": ["mradermacher/gemma2-gutenberg-9B-GGUF", MessagesFormatterType.ALPACA],
156
+ "NemoDori-v0.5-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.5-12B-MN-BT-i1-GGUF", MessagesFormatterType.MISTRAL],
157
+ "NemoDori-v0.2.1-12B-MN-BT.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.1-12B-MN-BT-GGUF", MessagesFormatterType.MISTRAL],
158
+ "NemoDori-v0.2.2-12B-MN-ties.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2.2-12B-MN-ties-GGUF", MessagesFormatterType.MISTRAL],
159
+ "Mini-Magnum-Unboxed-12B-Q4_K_M.gguf": ["concedo/Mini-Magnum-Unboxed-12B-GGUF", MessagesFormatterType.ALPACA],
160
+ "L3.1-Siithamo-v0.1-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "L3.1-Siithamo-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-Siithamo-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
162
+ "Kitsunebi-v1-Gemma2-8k-9B.Q5_K_M.gguf": ["grimjim/Kitsunebi-v1-Gemma2-8k-9B-GGUF", MessagesFormatterType.ALPACA],
163
+ "Llama-3-8B-Stroganoff-3.0.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-3.0-i1-GGUF", MessagesFormatterType.LLAMA_3],
164
+ "NemoDori-v0.2-12B-MN-BT.i1-Q4_K_M.gguf": ["mradermacher/NemoDori-v0.2-12B-MN-BT-i1-GGUF", MessagesFormatterType.CHATML],
165
+ "NemoDori-v0.1-12B-MS.Q4_K_M.gguf": ["mradermacher/NemoDori-v0.1-12B-MS-GGUF", MessagesFormatterType.CHATML],
166
+ "magnum-12b-v2.i1-Q4_K_M.gguf": ["mradermacher/magnum-12b-v2-i1-GGUF", MessagesFormatterType.CHATML],
167
+ "Alpaca-Llama3.1-8B.Q5_K_M.gguf": ["mradermacher/Alpaca-Llama3.1-8B-GGUF", MessagesFormatterType.CHATML],
168
+ "Orthrus-12b-v0.8.Q4_K_M.gguf": ["mradermacher/Orthrus-12b-v0.8-GGUF", MessagesFormatterType.CHATML],
169
+ "LongWriter-llama3.1-8b-Q5_K_M.gguf": ["bartowski/LongWriter-llama3.1-8b-GGUF", MessagesFormatterType.MISTRAL],
170
+ "L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc.Q5_K_M.gguf": ["mradermacher/L3-bluuwhale-SAO-MIX-8B-V1_fp32-merge-calc-GGUF", MessagesFormatterType.LLAMA_3],
171
+ "YetAnotherMerge-v0.5.Q4_K_M.gguf": ["mradermacher/YetAnotherMerge-v0.5-GGUF", MessagesFormatterType.CHATML],
172
+ "open-hermes-sd-finetune-erot-story.Q5_K_M.gguf": ["mradermacher/open-hermes-sd-finetune-erot-story-GGUF", MessagesFormatterType.CHATML],
173
+ "OntologyHermes-2.5-Mistral-7B.Q6_K.gguf": ["mradermacher/OntologyHermes-2.5-Mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
174
+ "cosmic-2.i1-Q5_K_M.gguf": ["mradermacher/cosmic-2-i1-GGUF", MessagesFormatterType.MISTRAL],
175
+ "L3-Horizon-Anteros-Ara-v0.1-9B.i1-Q4_K_M.gguf": ["mradermacher/L3-Horizon-Anteros-Ara-v0.1-9B-i1-GGUF", MessagesFormatterType.LLAMA_3],
176
+ "Mistral-Nemo-Instruct-2407.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-Instruct-2407-i1-GGUF", MessagesFormatterType.MISTRAL],
177
+ "Ellaria-9B.i1-Q4_K_M.gguf": ["mradermacher/Ellaria-9B-i1-GGUF", MessagesFormatterType.ALPACA],
178
+ "Apollo-0.4-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Apollo-0.4-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.MISTRAL],
179
+ "NemoRemix-12B.Q4_K_M.gguf": ["mradermacher/NemoRemix-12B-GGUF", MessagesFormatterType.MISTRAL],
180
+ "32K_Selfbot.i1-Q5_K_M.gguf": ["mradermacher/32K_Selfbot-i1-GGUF", MessagesFormatterType.MISTRAL],
181
+ "Viviana_V3.i1-Q5_K_M.gguf": ["mradermacher/Viviana_V3-i1-GGUF", MessagesFormatterType.MISTRAL],
182
+ "dolphin-2.9.4-llama3.1-8b.i1-Q5_K_M.gguf": ["mradermacher/dolphin-2.9.4-llama3.1-8b-i1-GGUF", MessagesFormatterType.CHATML],
183
+ "L3-SAO-MIX-8B-V1.i1-Q5_K_M.gguf": ["mradermacher/L3-SAO-MIX-8B-V1-i1-GGUF", MessagesFormatterType.LLAMA_3],
184
+ "bestofllama3-8b-stock-q5_k_m.gguf": ["bunnycore/BestofLLama3-8B-stock-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
185
+ "L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M.gguf": ["bartowski/L3-Umbral-Mind-RP-v3.0-8B-GGUF", MessagesFormatterType.LLAMA_3],
186
+ "Tess-3-Mistral-Nemo-Q4_K_M.gguf": ["bartowski/Tess-3-Mistral-Nemo-GGUF", MessagesFormatterType.MISTRAL],
187
+ "Llama-3-8B-Stroganoff-2.0.Q5_K_M.gguf": ["RichardErkhov/HiroseKoichi_-_Llama-3-8B-Stroganoff-2.0-gguf", MessagesFormatterType.LLAMA_3],
188
+ "L3-8B-Helium3.Q5_K_M.gguf": ["mradermacher/L3-8B-Helium3-GGUF", MessagesFormatterType.LLAMA_3],
189
+ "MN-12B-Lyra-v1.i1-Q4_K_M.gguf": ["mradermacher/MN-12B-Lyra-v1-i1-GGUF", MessagesFormatterType.CHATML],
190
+ "mahou-1.3-mistral-nemo-12b-q5_k_m.gguf": ["sh1njuku/Mahou-1.3-mistral-nemo-12B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
191
+ "Humanish-Roleplay-Llama-3.1-8B.i1-Q5_K_M.gguf": ["mradermacher/Humanish-Roleplay-Llama-3.1-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
192
+ "Llama-3-Luminurse-v0.1-OAS-8B.Q5_K_M.gguf": ["grimjim/Llama-3-Luminurse-v0.1-OAS-8B-GGUF", MessagesFormatterType.LLAMA_3],
193
+ "L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf": ["L3.1-8B-Niitama-v1.1-Q5_K_M-imat.gguf", MessagesFormatterType.MISTRAL],
194
+ "Evolved-Llama3-8B.i1-Q5_K_M.gguf": ["mradermacher/Evolved-Llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
195
+ "Pantheon-RP-1.5-12b-Nemo.i1-Q4_K_M.gguf": ["mradermacher/Pantheon-RP-1.5-12b-Nemo-i1-GGUF", MessagesFormatterType.CHATML],
196
+ "DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-Q5_K_M.gguf": ["bartowski/DarkIdol-Llama-3.1-8B-Instruct-1.2-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
197
+ "Llama-3-Swallow-8B-Instruct-v0.1.Q5_K_M.gguf": ["YukiTomita-CC/Llama-3-Swallow-8B-Instruct-v0.1-IMat-GGUF_dolly-15k-ja-prompt", MessagesFormatterType.ALPACA],
198
+ "natsumura-storytelling-rp-1.0-llama-3.1-8B.Q5_K_M.gguf": ["tohur/natsumura-storytelling-rp-1.0-llama-3.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
199
+ "mini-magnum-12b-v1.1.i1-Q4_K_M.gguf": ["mradermacher/mini-magnum-12b-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
200
+ "MN-12B-Celeste-V1.9-Q4_K_M.gguf": ["bartowski/MN-12B-Celeste-V1.9-GGUF", MessagesFormatterType.CHATML],
201
+ "Llama-3.1-Techne-RP-8b-v1.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Techne-RP-8b-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
202
+ "L3-Rhaenys-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Rhaenys-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
203
+ "Llama-3.1-8b-Uncensored-Dare.i1-Q4_K_M.gguf": ["mradermacher/Llama-3.1-8b-Uncensored-Dare-i1-GGUF", MessagesFormatterType.LLAMA_3],
204
+ "Eros_Scribe-10.7b-v3.Q4_K_M.gguf": ["mradermacher/Eros_Scribe-10.7b-v3-GGUF", MessagesFormatterType.MISTRAL],
205
+ "Gemma2-Nephilim-v3-9B.i1-Q5_K_M.gguf": ["mradermacher/Gemma2-Nephilim-v3-9B-i1-GGUF", MessagesFormatterType.ALPACA],
206
+ "Nemomix-v4.0-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v4.0-12B-GGUF", MessagesFormatterType.CHATML],
207
+ "Nemomix-v0.1-12B-Q4_K_M.gguf": ["bartowski/Nemomix-v0.1-12B-GGUF", MessagesFormatterType.CHATML],
208
+ "Loki-v2.1.i1-Q5_K_M.gguf": ["mradermacher/Loki-v2.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
209
+ "llama3-8B-Special-Dark-RP2.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-Special-Dark-RP2-i1-GGUF", MessagesFormatterType.LLAMA_3],
210
+ "L3-8B-Celeste-v1-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-v1-GGUF", MessagesFormatterType.LLAMA_3],
211
+ "L3-8B-Celeste-V1.2-Q5_K_M.gguf": ["bartowski/L3-8B-Celeste-V1.2-GGUF", MessagesFormatterType.LLAMA_3],
212
+ "L3.1-8B-Celeste-V1.5.i1-Q5_K_M.gguf": ["mradermacher/L3.1-8B-Celeste-V1.5-i1-GGUF", MessagesFormatterType.MISTRAL],
213
+ "Celeste-12B-V1.6-Q4_K_M.gguf": ["bartowski/Celeste-12B-V1.6-GGUF", MessagesFormatterType.MISTRAL],
214
+ "L3-SthenoMaidBlackroot-8B-V1-exp5-11-Q4_K_M.gguf": ["DavidAU/L3-SthenoMaidBlackroot-8.9B-V1-BRAINSTORM-5x-GGUF", MessagesFormatterType.LLAMA_3],
215
+ "Llama-3.1-8B-Instruct-Fei-v1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-Instruct-Fei-v1-Uncensored-i1-GGUF", MessagesFormatterType.MISTRAL],
216
+ "IceCoffeeRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceCoffeeRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
217
+ "lumi-nemo-e2.0.Q4_K_M.gguf": ["mradermacher/lumi-nemo-e2.0-GGUF", MessagesFormatterType.MISTRAL],
218
+ "Lumimaid-v0.2-8B.i1-Q5_K_M.gguf": ["mradermacher/Lumimaid-v0.2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
219
+ "Lumimaid-v0.2-12B.i1-Q4_K_M.gguf": ["mradermacher/Lumimaid-v0.2-12B-i1-GGUF", MessagesFormatterType.LLAMA_3],
220
+ "Llama-3.1-8B-Instruct-abliterated_via_adapter.Q5_K_M.gguf": ["grimjim/Llama-3.1-8B-Instruct-abliterated_via_adapter-GGUF", MessagesFormatterType.LLAMA_3],
221
+ "Llama-Nephilim-Metamorphosis-v1-8B.Q5_K_M.gguf": ["grimjim/Llama-Nephilim-Metamorphosis-v1-8B-GGUF", MessagesFormatterType.LLAMA_3],
222
+ "Meta-Llama-3.1-8B-Instruct-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Meta-Llama-3.1-8B-Instruct-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
223
+ "pstella-16b.Q5_K_M.gguf": ["mradermacher/pstella-16b-GGUF", MessagesFormatterType.LLAMA_3],
224
+ "DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored.i1-Q5_K_M.gguf": ["mradermacher/DarkIdol-Llama-3.1-8B-Instruct-1.1-Uncensored-i1-GGUF", MessagesFormatterType.LLAMA_3],
225
+ "Mistral-Nemo-Instruct-2407-Q4_K_M.gguf": ["bartowski/Mistral-Nemo-Instruct-2407-GGUF", MessagesFormatterType.MISTRAL],
226
+ "ghost-8b-beta.q5_k.gguf": ["ZeroWw/ghost-8b-beta-GGUF", MessagesFormatterType.MISTRAL],
227
+ "Honey-Yuzu-13B.Q4_K_M.gguf": ["backyardai/Honey-Yuzu-13B-GGUF", MessagesFormatterType.MISTRAL],
228
+ "llama3-8B-DarkIdol-2.3-Uncensored-32K.i1-Q5_K_M.gguf": ["mradermacher/llama3-8B-DarkIdol-2.3-Uncensored-32K-i1-GGUF", MessagesFormatterType.LLAMA_3],
229
+ "LLaMa-3-Instruct-SmallPrefMix-ORPO-8B.i1-Q5_K_M.gguf": ["mradermacher/LLaMa-3-Instruct-SmallPrefMix-ORPO-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
230
+ "NeuralLemon.Q5_K_M.gguf": ["backyardai/NeuralLemon-GGUF", MessagesFormatterType.MISTRAL],
231
+ "Llama-3-Intermix.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-Intermix-i1-GGUF", MessagesFormatterType.LLAMA_3],
232
+ "C3TR-Adapter-Q4_k_m.gguf": ["webbigdata/C3TR-Adapter_gguf", MessagesFormatterType.ALPACA],
233
+ "Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3.Q5_K_M.gguf": ["mradermacher/Llama-3-8B-Magpie-Mix-RC-UltraDPO-08-3-GGUF", MessagesFormatterType.LLAMA_3],
234
+ "Tiger-Gemma-9B-v2.Q4_K_M.gguf": ["QuantFactory/Tiger-Gemma-9B-v2-GGUF", MessagesFormatterType.ALPACA],
235
+ "gemma-2-9b-it-SimPO.i1-Q4_K_M.gguf": ["mradermacher/gemma-2-9b-it-SimPO-i1-GGUF", MessagesFormatterType.ALPACA],
236
+ "Gemma-2-9B-It-SPPO-Iter3.Q4_K_M.iMatrix.gguf": ["MCZK/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.ALPACA],
237
+ "Llama-3-NeuralPaca-8b.Q4_K_M.gguf": ["RichardErkhov/NeuralNovel_-_Llama-3-NeuralPaca-8b-gguf", MessagesFormatterType.ALPACA],
238
+ "SaoRPM-2x8B.i1-Q4_K_M.gguf": ["mradermacher/SaoRPM-2x8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
239
+ "L3-Hecate-8B-v1.2.Q4_K_M.gguf": ["mradermacher/L3-Hecate-8B-v1.2-GGUF", MessagesFormatterType.LLAMA_3],
240
+ "Mahou-1.3b-llama3-8B.i1-Q4_K_M.gguf": ["mradermacher/Mahou-1.3b-llama3-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
241
+ "SwallowMaid-8B-L3-SPPO-abliterated.i1-Q5_K_M.gguf": ["mradermacher/SwallowMaid-8B-L3-SPPO-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
242
+ "L3-8B-Lunar-Stheno.i1-Q5_K_M.gguf": ["mradermacher/L3-8B-Lunar-Stheno-i1-GGUF", MessagesFormatterType.LLAMA_3],
243
+ "llama3_Loradent.Q4_K_M.gguf": ["mradermacher/llama3_Loradent-GGUF", MessagesFormatterType.LLAMA_3],
244
+ "Llama-3-8B-Stroganoff.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-8B-Stroganoff-i1-GGUF", MessagesFormatterType.LLAMA_3],
245
+ "L3-8B-EnchantedForest-v0.5.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-EnchantedForest-v0.5-i1-GGUF", MessagesFormatterType.LLAMA_3],
246
+ "gemma-radiation-rp-9b-q5_k_m.gguf": ["pegasus912/Gemma-Radiation-RP-9B-Q5_K_M-GGUF", MessagesFormatterType.MISTRAL],
247
+ "Magic-Dolphin-7b.Q4_K_M.gguf": ["mradermacher/Magic-Dolphin-7b-GGUF", MessagesFormatterType.MISTRAL],
248
+ "mathstral-7B-v0.1-Q5_K_M.gguf": ["bartowski/mathstral-7B-v0.1-GGUF", MessagesFormatterType.MISTRAL],
249
+ "Gemma2-9B-it-Boku-v1.Q5_K_M.gguf": ["mradermacher/Gemma2-9B-it-Boku-v1-GGUF", MessagesFormatterType.MISTRAL],
250
+ "Gemma-2-9B-It-SPPO-Iter3-Q5_K_M.gguf": ["grapevine-AI/Gemma-2-9B-It-SPPO-Iter3-GGUF", MessagesFormatterType.MISTRAL],
251
+ "L3-8B-Niitama-v1.i1-Q4_K_M.gguf": ["mradermacher/L3-8B-Niitama-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
252
+ "Maidphin-Kunoichi-7B.Q5_K_M.gguf": ["RichardErkhov/nbeerbower_-_Maidphin-Kunoichi-7B-gguf", MessagesFormatterType.MISTRAL],
253
+ "L3-15B-EtherealMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-EtherealMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
254
+ "L3-15B-MythicalMaid-t0.0001.i1-Q4_K_M.gguf": ["mradermacher/L3-15B-MythicalMaid-t0.0001-i1-GGUF", MessagesFormatterType.LLAMA_3],
255
+ "llama-3-Nephilim-v3-8B.Q5_K_M.gguf": ["grimjim/llama-3-Nephilim-v3-8B-GGUF", MessagesFormatterType.LLAMA_3],
256
+ "NarutoDolphin-10B.Q5_K_M.gguf": ["RichardErkhov/FelixChao_-_NarutoDolphin-10B-gguf", MessagesFormatterType.MISTRAL],
257
+ "l3-8b-tamamo-v1-q8_0.gguf": ["Ransss/L3-8B-Tamamo-v1-Q8_0-GGUF", MessagesFormatterType.LLAMA_3],
258
+ "Tiger-Gemma-9B-v1-Q4_K_M.gguf": ["bartowski/Tiger-Gemma-9B-v1-GGUF", MessagesFormatterType.LLAMA_3],
259
+ "TooManyMixRolePlay-7B-Story_V3.5.Q4_K_M.gguf": ["mradermacher/TooManyMixRolePlay-7B-Story_V3.5-GGUF", MessagesFormatterType.LLAMA_3],
260
+ "natsumura-llama3-v1.1-8b.Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
261
+ "natsumura-llama3-v1-8b.i1-Q4_K_M.gguf": ["mradermacher/natsumura-llama3-v1-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
262
+ "nephra_v1.0.Q5_K_M.gguf": ["PrunaAI/yodayo-ai-nephra_v1.0-GGUF-smashed", MessagesFormatterType.LLAMA_3],
263
+ "DPO-ONLY-Zephyr-7B.Q6_K.gguf": ["mradermacher/DPO-ONLY-Zephyr-7B-GGUF", MessagesFormatterType.LLAMA_3],
264
+ "L3-Deluxe-Scrambled-Eggs-On-Toast-8B.Q8_0.gguf": ["mradermacher/L3-Deluxe-Scrambled-Eggs-On-Toast-8B-GGUF", MessagesFormatterType.LLAMA_3],
265
+ "L3-Scrambled-Eggs-On-Toast-8B.i1-Q6_K.gguf": ["mradermacher/L3-Scrambled-Eggs-On-Toast-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
266
+ "Llama-3-uncensored-Dare-1.Q4_K_M.gguf": ["mradermacher/Llama-3-uncensored-Dare-1-GGUF", MessagesFormatterType.LLAMA_3],
267
+ "llama3-8B-DarkIdol-2.2-Uncensored-1048K.i1-Q6_K.gguf": ["mradermacher/llama3-8B-DarkIdol-2.2-Uncensored-1048K-i1-GGUF", MessagesFormatterType.LLAMA_3],
268
+ "dolphin-2.9.3-mistral-7b-32k-q4_k_m.gguf": ["huggingkot/dolphin-2.9.3-mistral-7B-32k-Q4_K_M-GGUF", MessagesFormatterType.MISTRAL],
269
+ "dolphin-2.9.3-mistral-7B-32k-Q5_K_M.gguf": ["bartowski/dolphin-2.9.3-mistral-7B-32k-GGUF", MessagesFormatterType.MISTRAL],
270
+ "Lexi-Llama-3-8B-Uncensored_Q5_K_M.gguf": ["Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF", MessagesFormatterType.LLAMA_3],
271
+ "Llama3-Sophie.Q8_0.gguf": ["mradermacher/Llama3-Sophie-GGUF", MessagesFormatterType.LLAMA_3],
272
+ "Aura-Uncensored-OAS-8B-L3.i1-Q4_K_M.gguf": ["mradermacher/Aura-Uncensored-OAS-8B-L3-i1-GGUF", MessagesFormatterType.LLAMA_3],
273
+ "L3-Uncen-Merger-Omelette-RP-v0.2-8B-Q5_K_S-imat.gguf": ["LWDCLS/L3-Uncen-Merger-Omelette-RP-v0.2-8B-GGUF-IQ-Imatrix-Request", MessagesFormatterType.LLAMA_3],
274
+ "qwen2-diffusion-prompter-v01-q6_k.gguf": ["trollek/Qwen2-0.5B-DiffusionPrompter-v0.1-GGUF", MessagesFormatterType.LLAMA_3],
275
+ "Smegmma-Deluxe-9B-v1-Q6_K.gguf": ["bartowski/Smegmma-Deluxe-9B-v1-GGUF", MessagesFormatterType.MISTRAL],
276
+ "Mahou-1.3c-mistral-7B.i1-Q6_K.gguf": ["mradermacher/Mahou-1.3c-mistral-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
277
+ "Silicon-Maid-7B-Q8_0_X.gguf": ["duyntnet/Silicon-Maid-7B-imatrix-GGUF", MessagesFormatterType.ALPACA],
278
+ "l3-umbral-mind-rp-v3.0-8b-q5_k_m-imat.gguf": ["Casual-Autopsy/L3-Umbral-Mind-RP-v3.0-8B-Q5_K_M-GGUF", MessagesFormatterType.LLAMA_3],
279
+ "Meta-Llama-3.1-8B-Claude-iMat-Q5_K_M.gguf": ["InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF", MessagesFormatterType.LLAMA_3],
280
+ "Phi-3.1-mini-128k-instruct-Q6_K_L.gguf": ["bartowski/Phi-3.1-mini-128k-instruct-GGUF", MessagesFormatterType.PHI_3],
281
+ "tifa-7b-qwen2-v0.1.q4_k_m.gguf": ["Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
282
+ "Oumuamua-7b-RP_Q5_K_M.gguf": ["Aratako/Oumuamua-7b-RP-GGUF", MessagesFormatterType.MISTRAL],
283
+ "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
284
+ "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
285
+ "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
286
+ "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
287
+ "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
288
+ "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
289
+ "Llama-3-Nymeria-ELYZA-8B.i1-Q4_K_M.gguf": ["mradermacher/Llama-3-Nymeria-ELYZA-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
290
+ "suzume-llama-3-8B-japanese.Q4_K_M.gguf": ["PrunaAI/lightblue-suzume-llama-3-8B-japanese-GGUF-smashed", MessagesFormatterType.LLAMA_3],
291
+ "suzume-llama-3-8B-multilingual-orpo-borda-top25.Q4_K_M.gguf": ["RichardErkhov/lightblue_-_suzume-llama-3-8B-multilingual-orpo-borda-top25-gguf", MessagesFormatterType.LLAMA_3],
292
+ "Bungo-L3-8B.Q5_K_M.gguf": ["backyardai/Bungo-L3-8B-GGUF", MessagesFormatterType.LLAMA_3],
293
+ "ezo-common-t2-2b-gemma-2-it.Q6_K.gguf": ["keitokei1994/EZO-Common-T2-2B-gemma-2-it-GGUF", MessagesFormatterType.ALPACA],
294
+ "Llama-3-EZO-8b-Common-it.Q5_K_M.iMatrix.gguf": ["MCZK/Llama-3-EZO-8b-Common-it-GGUF", MessagesFormatterType.MISTRAL],
295
+ "EZO-Common-9B-gemma-2-it.i1-Q4_K_M.gguf": ["mradermacher/EZO-Common-9B-gemma-2-it-i1-GGUF", MessagesFormatterType.MISTRAL],
296
+ }
297
+ llm_formats = {
298
+ "MISTRAL": MessagesFormatterType.MISTRAL,
299
+ "CHATML": MessagesFormatterType.CHATML,
300
+ "VICUNA": MessagesFormatterType.VICUNA,
301
+ "LLAMA 2": MessagesFormatterType.LLAMA_2,
302
+ "SYNTHIA": MessagesFormatterType.SYNTHIA,
303
+ "NEURAL CHAT": MessagesFormatterType.NEURAL_CHAT,
304
+ "SOLAR": MessagesFormatterType.SOLAR,
305
+ "OPEN CHAT": MessagesFormatterType.OPEN_CHAT,
306
+ "ALPACA": MessagesFormatterType.ALPACA,
307
+ "CODE DS": MessagesFormatterType.CODE_DS,
308
+ "B22": MessagesFormatterType.B22,
309
+ "LLAMA 3": MessagesFormatterType.LLAMA_3,
310
+ "PHI 3": MessagesFormatterType.PHI_3,
311
+ "Autocoder": MessagesFormatterType.AUTOCODER,
312
+ "DeepSeek Coder v2": MessagesFormatterType.DEEP_SEEK_CODER_2,
313
+ "Gemma 2": MessagesFormatterType.ALPACA,
314
+ "Qwen2": MessagesFormatterType.OPEN_CHAT,
315
+ }
316
+ # https://github.com/Maximilian-Winter/llama-cpp-agent
317
+ llm_languages = ["English", "Japanese", "Chinese"]
318
+ llm_models_tupled_list = []
319
+ default_llm_model_filename = list(llm_models.keys())[0]
320
+ override_llm_format = None
321
+
322
+
323
+ def to_list(s):
324
+ return [x.strip() for x in s.split(",") if not s == ""]
325
+
326
+
327
+ def list_uniq(l):
328
+ return sorted(set(l), key=l.index)
329
+
330
+
331
+ @wrapt_timeout_decorator.timeout(dec_timeout=3.5)
332
+ def to_list_ja(s):
333
+ import re
334
+ s = re.sub(r'[、。]', ',', s)
335
+ return [x.strip() for x in s.split(",") if not s == ""]
336
+
337
+
338
+ def is_japanese(s):
339
+ import unicodedata
340
+ for ch in s:
341
+ name = unicodedata.name(ch, "")
342
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
343
+ return True
344
+ return False
345
+
346
+
347
+ def update_llm_model_tupled_list():
348
+ from pathlib import Path
349
+ global llm_models_tupled_list
350
+ llm_models_tupled_list = []
351
+ for k, v in llm_models.items():
352
+ name = k
353
+ value = k
354
+ llm_models_tupled_list.append((name, value))
355
+ model_files = Path(llm_models_dir).glob('*.gguf')
356
+ for path in model_files:
357
+ name = path.name
358
+ value = path.name
359
+ llm_models_tupled_list.append((name, value))
360
+ llm_models_tupled_list = list_uniq(llm_models_tupled_list)
361
+ return llm_models_tupled_list
362
+
363
+
364
+ def download_llm_models():
365
+ from huggingface_hub import hf_hub_download
366
+ global llm_models_tupled_list
367
+ llm_models_tupled_list = []
368
+ for k, v in llm_models.items():
369
+ try:
370
+ hf_hub_download(repo_id = v[0], filename = k, local_dir = llm_models_dir)
371
+ except Exception:
372
+ continue
373
+ name = k
374
+ value = k
375
+ llm_models_tupled_list.append((name, value))
376
+
377
+
378
+ def download_llm_model(filename):
379
+ from huggingface_hub import hf_hub_download
380
+ if not filename in llm_models.keys(): return default_llm_model_filename
381
+ try:
382
+ hf_hub_download(repo_id = llm_models[filename][0], filename = filename, local_dir = llm_models_dir)
383
+ except Exception:
384
+ return default_llm_model_filename
385
+ update_llm_model_tupled_list()
386
+ return filename
387
+
388
+
389
+ def get_dolphin_model_info(filename):
390
+ md = "None"
391
+ items = llm_models.get(filename, None)
392
+ if items:
393
+ md = f'Repo: [{items[0]}](https://huggingface.co/{items[0]})'
394
+ return md
395
+
396
+
397
+ def select_dolphin_model(filename, progress=gr.Progress(track_tqdm=True)):
398
+ global override_llm_format
399
+ override_llm_format = None
400
+ progress(0, desc="Loading model...")
401
+ value = download_llm_model(filename)
402
+ progress(1, desc="Model loaded.")
403
+ md = get_dolphin_model_info(filename)
404
+ return gr.update(value=value, choices=get_dolphin_models()), gr.update(value=get_dolphin_model_format(value)), gr.update(value=md)
405
+
406
+
407
+ def select_dolphin_format(format_name):
408
+ global override_llm_format
409
+ override_llm_format = llm_formats[format_name]
410
+ return gr.update(value=format_name)
411
+
412
+
413
+ #download_llm_models()
414
+ download_llm_model(default_llm_model_filename)
415
+
416
+
417
+ def get_dolphin_models():
418
+ return update_llm_model_tupled_list()
419
+
420
+
421
+ def get_llm_formats():
422
+ return list(llm_formats.keys())
423
+
424
+
425
+ def get_key_from_value(d, val):
426
+ keys = [k for k, v in d.items() if v == val]
427
+ if keys:
428
+ return keys[0]
429
+ return None
430
+
431
+
432
+ def get_dolphin_model_format(filename):
433
+ if not filename in llm_models.keys(): filename = default_llm_model_filename
434
+ format = llm_models[filename][1]
435
+ format_name = get_key_from_value(llm_formats, format)
436
+ return format_name
437
+
438
+
439
+ def add_dolphin_models(query, format_name):
440
+ import re
441
+ from huggingface_hub import HfApi
442
+ global llm_models
443
+ api = HfApi()
444
+ add_models = {}
445
+ format = llm_formats[format_name]
446
+ filename = ""
447
+ repo = ""
448
+ try:
449
+ s = list(re.findall(r'^(?:https?://huggingface.co/)?(.+?/.+?)(?:/.*/(.+?.gguf).*?)?$', query)[0])
450
+ if s and "" in s: s.remove("")
451
+ if len(s) == 1:
452
+ repo = s[0]
453
+ if not api.repo_exists(repo_id = repo): return gr.update(visible=True)
454
+ files = api.list_repo_files(repo_id = repo)
455
+ for file in files:
456
+ if str(file).endswith(".gguf"): add_models[filename] = [repo, format]
457
+ elif len(s) >= 2:
458
+ repo = s[0]
459
+ filename = s[1]
460
+ if not api.repo_exists(repo_id = repo) or not api.file_exists(repo_id = repo, filename = filename): return gr.update(visible=True)
461
+ add_models[filename] = [repo, format]
462
+ else: return gr.update(visible=True)
463
+ except Exception:
464
+ return gr.update(visible=True)
465
+ print(add_models)
466
+ llm_models = (llm_models | add_models).copy()
467
+ return gr.update(choices=get_dolphin_models())
468
+
469
+
470
+ dolphin_output_language = "English"
471
+ dolphin_sysprompt_mode = "Default"
472
+ dolphin_system_prompt = {"Default": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
473
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
474
+ ```
475
+ [Tags]
476
+ - Words to describe full names of characters and names of series in which they appear.
477
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
478
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
479
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
480
+ - Words to describe their stance from head to toe in detail.
481
+ - Words to describe their acting, especially with sexual activity in detail.
482
+ - Words to describe their surroundings in detail.
483
+ - Words to describe background details, such as inside room, forest, starry sky.
484
+ [Rules]
485
+ - Any output should be plain text in English and don't use line breaks.
486
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
487
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
488
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
489
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
490
+ - Convert any nicknames to full names first.
491
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
492
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
493
+ - Split sentences into short phrases or words, and then convert them to Tags.
494
+ - Use associated Danbooru tags, E621 tags.
495
+ - Same Tags should be used only once per output.
496
+ - Anyway, keep processing until you've finished outputting a message.
497
+ ```
498
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
499
+ "Strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
500
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
501
+ ```
502
+ [Tags]
503
+ - Words to describe full names of characters and names of series in which they appear.
504
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
505
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
506
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
507
+ - Words to describe their stance from head to toe in detail.
508
+ - Words to describe their acting, especially with sexual activity in detail.
509
+ - Words to describe their surroundings in detail.
510
+ - Words to describe background details, such as inside room, forest, starry sky.
511
+ [Rules]
512
+ - Any output should be plain text in English and don't use line breaks.
513
+ - Output only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
514
+ - Output should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
515
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
516
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
517
+ - Convert any nicknames to full names first.
518
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
519
+ - Rewrite the given themes in plain English without changing the main idea.
520
+ - Split sentences into short phrases or words, and then convert them to Tags.
521
+ - Use associated Danbooru tags, E621 tags.
522
+ - Same Tags should be used only once per output.
523
+ - Anyway, keep processing until you've finished outputting a message.
524
+ ```
525
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
526
+ "With description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
527
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
528
+ ```
529
+ [Tags]
530
+ - Words to describe full names of characters and names of series in which they appear.
531
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
532
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
533
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
534
+ - Words to describe their stance from head to toe in detail.
535
+ - Words to describe their acting, especially with sexual activity in detail.
536
+ - Words to describe their surroundings in detail.
537
+ - Words to describe background details, such as inside room, forest, starry sky.
538
+ [Rules]
539
+ - Any Tags should be plain text in English and don't use line breaks.
540
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
541
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
542
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
543
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
544
+ - Convert any nicknames to full names first.
545
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
546
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
547
+ - Split sentences into short phrases or words, and then convert them to Tags.
548
+ - Use associated Danbooru tags, E621 tags.
549
+ - Same Tags should be used only once per output.
550
+ - Anyway, keep processing until you've finished outputting a message.
551
+ ```
552
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
553
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
554
+ "With dialogue and description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
555
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
556
+ ```
557
+ [Tags]
558
+ - Words to describe full names of characters and names of series in which they appear.
559
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
560
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail, such as long hair.
561
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
562
+ - Words to describe their stance from head to toe in detail.
563
+ - Words to describe their acting, especially with sexual activity in detail.
564
+ - Words to describe their surroundings in detail.
565
+ - Words to describe background details, such as inside room, forest, starry sky.
566
+ [Rules]
567
+ - Any Tags should be plain text in English and don't use line breaks.
568
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
569
+ - Message should be in the format: "//GENBEGIN//://1girl, Tag, Tag, ..., Tag//://GENEND//".
570
+ - Preferably refer to and describe the information obtained from Danbooru. If not, describe it in own way.
571
+ - It's preferable that each Tag is a plain phrase, word, caption, Danbooru tag, or E621 tag.
572
+ - Convert any nicknames to full names first.
573
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
574
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into an detailed English sentences based on the central character internally.
575
+ - Split sentences into short phrases or words, and then convert them to Tags.
576
+ - Use associated Danbooru tags, E621 tags.
577
+ - Same Tags should be used only once per output.
578
+ - Anyway, keep processing until you've finished outputting a message.
579
+ ```
580
+ Based on these Rules, please tell me message within 40 Tags that can generate an image for the following themes,
581
+ then write the character's long actor's line composed of one's voices and moaning and voices in thought, based on the story you have assembled, in <LANGUAGE>,
582
+ enclosed in //VOICEBEGIN//:// and //://VOICEEND//, then describe the message you've generated in short, in <LANGUAGE>.:''',
583
+ "Longer prompt": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
584
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
585
+ ```
586
+ [Tags]
587
+ - Words to describe full names of characters and names of series in which they appear.
588
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
589
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
590
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
591
+ - Words to describe their stance from head to toe in detail.
592
+ - Words to describe their acting, especially with sexual activity in detail.
593
+ - Words to describe their surroundings in detail.
594
+ - Words to describe background details.
595
+ [Rules]
596
+ - Any Tags should be plain text in English and don't use line breaks.
597
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
598
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
599
+ - Convert any nicknames to full names first.
600
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
601
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
602
+ - Tags can be in the form of sentences.
603
+ - You can also use Danbooru tags, E621 tags as Tags.
604
+ - Anyway, keep processing until you've finished outputting a message.
605
+ ```
606
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
607
+ "Longer prompt strictly on themes": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
608
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
609
+ ```
610
+ [Tags]
611
+ - Words to describe full names of characters and names of series in which they appear.
612
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
613
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
614
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
615
+ - Words to describe their stance from head to toe in detail.
616
+ - Words to describe their acting, especially with sexual activity in detail.
617
+ - Words to describe their surroundings in detail.
618
+ - Words to describe background details.
619
+ [Rules]
620
+ - Any Tags should be plain text in English and don't use line breaks.
621
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
622
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
623
+ - Convert any nicknames to full names first.
624
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
625
+ - Rewrite the given themes in plain English without changing the main idea.
626
+ - Tags can be in the form of sentences.
627
+ - You can also use Danbooru tags, E621 tags as Tags.
628
+ - Anyway, keep processing until you've finished outputting a message.
629
+ ```
630
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes:''',
631
+ "Longer prompt with description": r'''You are a helpful AI assistant to generate messages for AI that outputs an image when I enter a message.
632
+ The message must have the following [Tags] generated in strict accordance with the following [Rules]:
633
+ ```
634
+ [Tags]
635
+ - Words to describe full names of characters and names of series in which they appear.
636
+ - Words to describe names of the people there and their numbers, such as 2girls, 1boy.
637
+ - Words to describe their hair color, hairstyle, hair length, hair accessory, eye color, eye shape, facial expression, breast size, and clothing of them in detail.
638
+ - Words to describe their external features, ornaments and belongings (also specify colors, patterns, shapes) in detail.
639
+ - Words to describe their stance from head to toe in detail.
640
+ - Words to describe their acting, especially with sexual activity in detail.
641
+ - Words to describe their surroundings in detail.
642
+ - Words to describe background details.
643
+ [Rules]
644
+ - Any Tags should be plain text in English and don't use line breaks.
645
+ - Message is only composed of Tags in 1 line, separated by commas with spaces between Tags, in lower case English.
646
+ - Message should be enclosed in //GENBEGIN//:// and //://GENEND//.
647
+ - Convert any nicknames to full names first.
648
+ - If a sexual theme is given, priority should be given to specific and rich descriptions of sexual activity, especially about genitals, fluids.
649
+ - Assemble a short story internally which is developed from the themes provided, then describe a scene into detailed English text based on the central character internally.
650
+ - Tags can be in the form of sentences.
651
+ - You can also use Danbooru tags, E621 tags as Tags.
652
+ - Anyway, keep processing until you've finished outputting a message.
653
+ ```
654
+ Based on these Rules, please tell me a message within 40 Tags that can generate an image for the following themes,
655
+ then describe the message you've generated in short, in <LANGUAGE>.:''',
656
+ "Japanese to Danbooru Dictionary": r"""You are a helpful AI assistant.
657
+ Extract Japanese words from the following sentences and output them separated by commas. Convert words in their original forms.
658
+ Output should be enclosed in //GENBEGIN//:// and //://GENEND//. The text to be given is as follows:""",
659
+ "Chat with LLM": r"You are a helpful AI assistant. Respond in <LANGUAGE>."}
660
+
661
+
662
+ def get_dolphin_sysprompt():
663
+ import re
664
+ prompt = re.sub('<LANGUAGE>', dolphin_output_language, dolphin_system_prompt.get(dolphin_sysprompt_mode, ""))
665
+ return prompt
666
+
667
+
668
+ def get_dolphin_sysprompt_mode():
669
+ return list(dolphin_system_prompt.keys())
670
+
671
+
672
+ def select_dolphin_sysprompt(key: str):
673
+ global dolphin_sysprompt_mode
674
+ if not key in dolphin_system_prompt.keys():
675
+ dolphin_sysprompt_mode = "Default"
676
+ else:
677
+ dolphin_sysprompt_mode = key
678
+ return gr.update(value=get_dolphin_sysprompt())
679
+
680
+
681
+ def get_dolphin_languages():
682
+ return llm_languages
683
+
684
+
685
+ def select_dolphin_language(lang: str):
686
+ global dolphin_output_language
687
+ dolphin_output_language = lang
688
+ return gr.update(value=get_dolphin_sysprompt())
689
+
690
+
691
+ @wrapt_timeout_decorator.timeout(dec_timeout=5.0)
692
+ def get_raw_prompt(msg: str):
693
+ import re
694
+ m = re.findall(r'/GENBEGIN/(.+?)/GENEND/', msg, re.DOTALL)
695
+ return re.sub(r'[*/:_"#\n]', ' ', ", ".join(m)).lower() if m else ""
696
+
697
+
698
+ @spaces.GPU(duration=60)
699
+ def dolphin_respond(
700
+ message: str,
701
+ history: list[tuple[str, str]],
702
+ model: str = default_llm_model_filename,
703
+ system_message: str = get_dolphin_sysprompt(),
704
+ max_tokens: int = 1024,
705
+ temperature: float = 0.7,
706
+ top_p: float = 0.95,
707
+ top_k: int = 40,
708
+ repeat_penalty: float = 1.1,
709
+ progress=gr.Progress(track_tqdm=True),
710
+ ):
711
+ from pathlib import Path
712
+ progress(0, desc="Processing...")
713
+
714
+ if override_llm_format:
715
+ chat_template = override_llm_format
716
+ else:
717
+ chat_template = llm_models[model][1]
718
+
719
+ llm = Llama(
720
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
721
+ flash_attn=True,
722
+ n_gpu_layers=81, # 81
723
+ n_batch=1024,
724
+ n_ctx=8192, #8192
725
+ )
726
+ provider = LlamaCppPythonProvider(llm)
727
+
728
+ agent = LlamaCppAgent(
729
+ provider,
730
+ system_prompt=f"{system_message}",
731
+ predefined_messages_formatter_type=chat_template,
732
+ debug_output=False
733
+ )
734
+
735
+ settings = provider.get_provider_default_settings()
736
+ settings.temperature = temperature
737
+ settings.top_k = top_k
738
+ settings.top_p = top_p
739
+ settings.max_tokens = max_tokens
740
+ settings.repeat_penalty = repeat_penalty
741
+ settings.stream = True
742
+
743
+ messages = BasicChatHistory()
744
+
745
+ for msn in history:
746
+ user = {
747
+ 'role': Roles.user,
748
+ 'content': msn[0]
749
+ }
750
+ assistant = {
751
+ 'role': Roles.assistant,
752
+ 'content': msn[1]
753
+ }
754
+ messages.add_message(user)
755
+ messages.add_message(assistant)
756
+
757
+ stream = agent.get_chat_response(
758
+ message,
759
+ llm_sampling_settings=settings,
760
+ chat_history=messages,
761
+ returns_streaming_generator=True,
762
+ print_output=False
763
+ )
764
+
765
+ progress(0.5, desc="Processing...")
766
+
767
+ outputs = ""
768
+ for output in stream:
769
+ outputs += output
770
+ yield [(outputs, None)]
771
+
772
+
773
+ def dolphin_parse(
774
+ history: list[tuple[str, str]],
775
+ ):
776
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1:
777
+ return "", gr.update(visible=True), gr.update(visible=True)
778
+ try:
779
+ msg = history[-1][0]
780
+ raw_prompt = get_raw_prompt(msg)
781
+ except Exception:
782
+ return "", gr.update(visible=True), gr.update(visible=True)
783
+ prompts = []
784
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
785
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit"])
786
+ else:
787
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit"])
788
+ return ", ".join(prompts), gr.update(interactive=True), gr.update(interactive=True)
789
+
790
+
791
+ @spaces.GPU(duration=60)
792
+ def dolphin_respond_auto(
793
+ message: str,
794
+ history: list[tuple[str, str]],
795
+ model: str = default_llm_model_filename,
796
+ system_message: str = get_dolphin_sysprompt(),
797
+ max_tokens: int = 1024,
798
+ temperature: float = 0.7,
799
+ top_p: float = 0.95,
800
+ top_k: int = 40,
801
+ repeat_penalty: float = 1.1,
802
+ progress=gr.Progress(track_tqdm=True),
803
+ ):
804
+ #if not is_japanese(message): return [(None, None)]
805
+ from pathlib import Path
806
+ progress(0, desc="Processing...")
807
+
808
+ if override_llm_format:
809
+ chat_template = override_llm_format
810
+ else:
811
+ chat_template = llm_models[model][1]
812
+
813
+ llm = Llama(
814
+ model_path=str(Path(f"{llm_models_dir}/{model}")),
815
+ flash_attn=True,
816
+ n_gpu_layers=81, # 81
817
+ n_batch=1024,
818
+ n_ctx=8192, #8192
819
+ )
820
+ provider = LlamaCppPythonProvider(llm)
821
+
822
+ agent = LlamaCppAgent(
823
+ provider,
824
+ system_prompt=f"{system_message}",
825
+ predefined_messages_formatter_type=chat_template,
826
+ debug_output=False
827
+ )
828
+
829
+ settings = provider.get_provider_default_settings()
830
+ settings.temperature = temperature
831
+ settings.top_k = top_k
832
+ settings.top_p = top_p
833
+ settings.max_tokens = max_tokens
834
+ settings.repeat_penalty = repeat_penalty
835
+ settings.stream = True
836
+
837
+ messages = BasicChatHistory()
838
+
839
+ for msn in history:
840
+ user = {
841
+ 'role': Roles.user,
842
+ 'content': msn[0]
843
+ }
844
+ assistant = {
845
+ 'role': Roles.assistant,
846
+ 'content': msn[1]
847
+ }
848
+ messages.add_message(user)
849
+ messages.add_message(assistant)
850
+
851
+ progress(0, desc="Translating...")
852
+ stream = agent.get_chat_response(
853
+ message,
854
+ llm_sampling_settings=settings,
855
+ chat_history=messages,
856
+ returns_streaming_generator=True,
857
+ print_output=False
858
+ )
859
+
860
+ progress(0.5, desc="Processing...")
861
+
862
+ outputs = ""
863
+ for output in stream:
864
+ outputs += output
865
+ yield [(outputs, None)]
866
+
867
+
868
+ def dolphin_parse_simple(
869
+ message: str,
870
+ history: list[tuple[str, str]],
871
+ ):
872
+ #if not is_japanese(message): return message
873
+ if dolphin_sysprompt_mode == "Chat with LLM" or not history or len(history) < 1: return message
874
+ try:
875
+ msg = history[-1][0]
876
+ raw_prompt = get_raw_prompt(msg)
877
+ except Exception:
878
+ return ""
879
+ prompts = []
880
+ if dolphin_sysprompt_mode == "Japanese to Danbooru Dictionary" and is_japanese(raw_prompt):
881
+ prompts = list_uniq(jatags_to_danbooru_tags(to_list_ja(raw_prompt)) + ["nsfw", "explicit", "rating_explicit"])
882
+ else:
883
+ prompts = list_uniq(to_list(raw_prompt) + ["nsfw", "explicit", "rating_explicit"])
884
+ return ", ".join(prompts)
lora_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
model_dict.json ADDED
The diff for this file is too large to render. See raw diff
 
modutils.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import json
3
+ import gradio as gr
4
+ from huggingface_hub import HfApi
5
+ import os
6
+ from pathlib import Path
7
+
8
+ from env import (
9
+ HF_LORA_PRIVATE_REPOS1,
10
+ HF_LORA_PRIVATE_REPOS2,
11
+ HF_MODEL_USER_EX,
12
+ HF_MODEL_USER_LIKES,
13
+ directory_loras,
14
+ hf_read_token,
15
+ hf_token,
16
+ CIVITAI_API_KEY,
17
+ )
18
+
19
+
20
+ def get_user_agent():
21
+ return 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
22
+
23
+
24
+ def to_list(s):
25
+ return [x.strip() for x in s.split(",") if not s == ""]
26
+
27
+
28
+ def list_uniq(l):
29
+ return sorted(set(l), key=l.index)
30
+
31
+
32
+ def list_sub(a, b):
33
+ return [e for e in a if e not in b]
34
+
35
+
36
+ def get_local_model_list(dir_path):
37
+ model_list = []
38
+ valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
39
+ for file in Path(dir_path).glob("*"):
40
+ if file.suffix in valid_extensions:
41
+ file_path = str(Path(f"{dir_path}/{file.name}"))
42
+ model_list.append(file_path)
43
+ return model_list
44
+
45
+
46
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
47
+ url = url.strip()
48
+
49
+ if "drive.google.com" in url:
50
+ original_dir = os.getcwd()
51
+ os.chdir(directory)
52
+ os.system(f"gdown --fuzzy {url}")
53
+ os.chdir(original_dir)
54
+ elif "huggingface.co" in url:
55
+ url = url.replace("?download=true", "")
56
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
57
+ if "/blob/" in url:
58
+ url = url.replace("/blob/", "/resolve/")
59
+ user_header = f'"Authorization: Bearer {hf_token}"'
60
+ if hf_token:
61
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
62
+ else:
63
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
64
+ elif "civitai.com" in url:
65
+ if "?" in url:
66
+ url = url.split("?")[0]
67
+ if civitai_api_key:
68
+ url = url + f"?token={civitai_api_key}"
69
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
70
+ else:
71
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
72
+ else:
73
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
74
+
75
+
76
+ def escape_lora_basename(basename: str):
77
+ return basename.replace(".", "_").replace(" ", "_").replace(",", "")
78
+
79
+
80
+ def to_lora_key(path: str):
81
+ return escape_lora_basename(Path(path).stem)
82
+
83
+
84
+ def to_lora_path(key: str):
85
+ if Path(key).is_file(): return key
86
+ path = Path(f"{directory_loras}/{escape_lora_basename(key)}.safetensors")
87
+ return str(path)
88
+
89
+
90
+ def safe_float(input):
91
+ output = 1.0
92
+ try:
93
+ output = float(input)
94
+ except Exception:
95
+ output = 1.0
96
+ return output
97
+
98
+
99
+ def save_gallery_images(images, progress=gr.Progress(track_tqdm=True)):
100
+ from datetime import datetime, timezone, timedelta
101
+ progress(0, desc="Updating gallery...")
102
+ dt_now = datetime.now(timezone(timedelta(hours=9)))
103
+ basename = dt_now.strftime('%Y%m%d_%H%M%S_')
104
+ i = 1
105
+ if not images: return images
106
+ output_images = []
107
+ output_paths = []
108
+ for image in images:
109
+ filename = basename + str(i) + ".png"
110
+ i += 1
111
+ oldpath = Path(image[0])
112
+ newpath = oldpath
113
+ try:
114
+ if oldpath.exists():
115
+ newpath = oldpath.resolve().rename(Path(filename).resolve())
116
+ except Exception:
117
+ pass
118
+ finally:
119
+ output_paths.append(str(newpath))
120
+ output_images.append((str(newpath), str(filename)))
121
+ progress(1, desc="Gallery updated.")
122
+ return gr.update(value=output_images), gr.update(value=output_paths), gr.update(visible=True)
123
+
124
+
125
+ def download_private_repo(repo_id, dir_path, is_replace):
126
+ from huggingface_hub import snapshot_download
127
+ if not hf_read_token: return
128
+ try:
129
+ snapshot_download(repo_id=repo_id, local_dir=dir_path, allow_patterns=['*.ckpt', '*.pt', '*.pth', '*.safetensors', '*.bin'], use_auth_token=hf_read_token)
130
+ except Exception as e:
131
+ print(f"Error: Failed to download {repo_id}. ")
132
+ return
133
+ if is_replace:
134
+ for file in Path(dir_path).glob("*"):
135
+ if file.exists() and "." in file.stem or " " in file.stem and file.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
136
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}')
137
+ file.resolve().rename(newpath.resolve())
138
+
139
+
140
+ private_model_path_repo_dict = {} # {"local filepath": "huggingface repo_id", ...}
141
+
142
+
143
+ def get_private_model_list(repo_id, dir_path):
144
+ global private_model_path_repo_dict
145
+ api = HfApi()
146
+ if not hf_read_token: return []
147
+ try:
148
+ files = api.list_repo_files(repo_id, token=hf_read_token)
149
+ except Exception as e:
150
+ print(f"Error: Failed to list {repo_id}. ")
151
+ return []
152
+ model_list = []
153
+ for file in files:
154
+ path = Path(f"{dir_path}/{file}")
155
+ if path.suffix in ['.ckpt', '.pt', '.pth', '.safetensors', '.bin']:
156
+ model_list.append(str(path))
157
+ for model in model_list:
158
+ private_model_path_repo_dict[model] = repo_id
159
+ return model_list
160
+
161
+
162
+ def download_private_file(repo_id, path, is_replace):
163
+ from huggingface_hub import hf_hub_download
164
+ file = Path(path)
165
+ newpath = Path(f'{file.parent.name}/{escape_lora_basename(file.stem)}{file.suffix}') if is_replace else file
166
+ if not hf_read_token or newpath.exists(): return
167
+ filename = file.name
168
+ dirname = file.parent.name
169
+ try:
170
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=dirname, use_auth_token=hf_read_token)
171
+ except Exception as e:
172
+ print(f"Error: Failed to download {filename}. ")
173
+ return
174
+ if is_replace:
175
+ file.resolve().rename(newpath.resolve())
176
+
177
+
178
+ def download_private_file_from_somewhere(path, is_replace):
179
+ if not path in private_model_path_repo_dict.keys(): return
180
+ repo_id = private_model_path_repo_dict.get(path, None)
181
+ download_private_file(repo_id, path, is_replace)
182
+
183
+
184
+ model_id_list = []
185
+ def get_model_id_list():
186
+ global model_id_list
187
+ if len(model_id_list) != 0: return model_id_list
188
+ api = HfApi()
189
+ model_ids = []
190
+ try:
191
+ models_likes = []
192
+ for author in HF_MODEL_USER_LIKES:
193
+ models_likes.extend(api.list_models(author=author, cardData=True, sort="likes"))
194
+ models_ex = []
195
+ for author in HF_MODEL_USER_EX:
196
+ models_ex = api.list_models(author=author, cardData=True, sort="last_modified")
197
+ except Exception as e:
198
+ print(f"Error: Failed to list {author}'s models. ")
199
+ return model_ids
200
+ for model in models_likes:
201
+ model_ids.append(model.id) if not model.private else ""
202
+ anime_models = []
203
+ real_models = []
204
+ for model in models_ex:
205
+ if not model.private:
206
+ anime_models.append(model.id) if 'anime' in model.tags else real_models.append(model.id)
207
+ model_ids.extend(anime_models)
208
+ model_ids.extend(real_models)
209
+ model_id_list = model_ids.copy()
210
+ return model_ids
211
+
212
+
213
+ model_id_list = get_model_id_list()
214
+
215
+
216
+ def get_t2i_model_info(repo_id: str):
217
+ api = HfApi()
218
+ try:
219
+ if " " in repo_id or not api.repo_exists(repo_id): return ""
220
+ model = api.model_info(repo_id=repo_id)
221
+ except Exception as e:
222
+ print(f"Error: Failed to get {repo_id}'s info. ")
223
+ return ""
224
+ if model.private or model.gated: return ""
225
+ tags = model.tags
226
+ info = []
227
+ url = f"https://huggingface.co/{repo_id}/"
228
+ if not 'diffusers' in tags: return ""
229
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
230
+ info.append("SDXL")
231
+ elif 'diffusers:StableDiffusionPipeline' in tags:
232
+ info.append("SD1.5")
233
+ if model.card_data and model.card_data.tags:
234
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
235
+ info.append(f"DLs: {model.downloads}")
236
+ info.append(f"likes: {model.likes}")
237
+ info.append(model.last_modified.strftime("lastmod: %Y-%m-%d"))
238
+ md = f"Model Info: {', '.join(info)}, [Model Repo]({url})"
239
+ return gr.update(value=md)
240
+
241
+
242
+ def get_tupled_model_list(model_list):
243
+ if not model_list: return []
244
+ tupled_list = []
245
+ for repo_id in model_list:
246
+ api = HfApi()
247
+ try:
248
+ if not api.repo_exists(repo_id): continue
249
+ model = api.model_info(repo_id=repo_id)
250
+ except Exception as e:
251
+ continue
252
+ if model.private or model.gated: continue
253
+ tags = model.tags
254
+ info = []
255
+ if not 'diffusers' in tags: continue
256
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
257
+ info.append("SDXL")
258
+ elif 'diffusers:StableDiffusionPipeline' in tags:
259
+ info.append("SD1.5")
260
+ if model.card_data and model.card_data.tags:
261
+ info.extend(list_sub(model.card_data.tags, ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']))
262
+ if "pony" in info:
263
+ info.remove("pony")
264
+ name = f"{repo_id} (Pony🐴, {', '.join(info)})"
265
+ else:
266
+ name = f"{repo_id} ({', '.join(info)})"
267
+ tupled_list.append((name, repo_id))
268
+ return tupled_list
269
+
270
+
271
+ private_lora_dict = {}
272
+ try:
273
+ with open('lora_dict.json', encoding='utf-8') as f:
274
+ d = json.load(f)
275
+ for k, v in d.items():
276
+ private_lora_dict[escape_lora_basename(k)] = v
277
+ except Exception:
278
+ pass
279
+ loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
280
+ civitai_not_exists_list = []
281
+ loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
282
+ civitai_lora_last_results = {} # {"URL to download": {search results}, ...}
283
+ all_lora_list = []
284
+
285
+
286
+ private_lora_model_list = []
287
+ def get_private_lora_model_lists():
288
+ global private_lora_model_list
289
+ if len(private_lora_model_list) != 0: return private_lora_model_list
290
+ models1 = []
291
+ models2 = []
292
+ for repo in HF_LORA_PRIVATE_REPOS1:
293
+ models1.extend(get_private_model_list(repo, directory_loras))
294
+ for repo in HF_LORA_PRIVATE_REPOS2:
295
+ models2.extend(get_private_model_list(repo, directory_loras))
296
+ models = list_uniq(models1 + sorted(models2))
297
+ private_lora_model_list = models.copy()
298
+ return models
299
+
300
+
301
+ private_lora_model_list = get_private_lora_model_lists()
302
+
303
+
304
+ def get_civitai_info(path):
305
+ global civitai_not_exists_list
306
+ import requests
307
+ from urllib3.util import Retry
308
+ from requests.adapters import HTTPAdapter
309
+ if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
310
+ if not Path(path).exists(): return None
311
+ user_agent = get_user_agent()
312
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
313
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
314
+ params = {}
315
+ session = requests.Session()
316
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
317
+ session.mount("https://", HTTPAdapter(max_retries=retries))
318
+ import hashlib
319
+ with open(path, 'rb') as file:
320
+ file_data = file.read()
321
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
322
+ url = base_url + hash_sha256
323
+ try:
324
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
325
+ except Exception as e:
326
+ return ["", "", "", "", ""]
327
+ if not r.ok: return None
328
+ json = r.json()
329
+ if not 'baseModel' in json:
330
+ civitai_not_exists_list.append(path)
331
+ return ["", "", "", "", ""]
332
+ items = []
333
+ items.append(" / ".join(json['trainedWords']))
334
+ items.append(json['baseModel'])
335
+ items.append(json['model']['name'])
336
+ items.append(f"https://civitai.com/models/{json['modelId']}")
337
+ items.append(json['images'][0]['url'])
338
+ return items
339
+
340
+
341
+ def get_lora_model_list():
342
+ loras = list_uniq(get_private_lora_model_lists() + get_local_model_list(directory_loras))
343
+ loras.insert(0, "None")
344
+ loras.insert(0, "")
345
+ return loras
346
+
347
+
348
+ def get_all_lora_list():
349
+ global all_lora_list
350
+ loras = get_lora_model_list()
351
+ all_lora_list = loras.copy()
352
+ return loras
353
+
354
+
355
+ def get_all_lora_tupled_list():
356
+ global loras_dict
357
+ models = get_all_lora_list()
358
+ if not models: return []
359
+ tupled_list = []
360
+ for model in models:
361
+ #if not model: continue # to avoid GUI-related bug
362
+ basename = Path(model).stem
363
+ key = to_lora_key(model)
364
+ items = None
365
+ if key in loras_dict.keys():
366
+ items = loras_dict.get(key, None)
367
+ else:
368
+ items = get_civitai_info(model)
369
+ if items != None:
370
+ loras_dict[key] = items
371
+ name = basename
372
+ value = model
373
+ if items and items[2] != "":
374
+ if items[1] == "Pony":
375
+ name = f"{basename} (for {items[1]}🐴, {items[2]})"
376
+ else:
377
+ name = f"{basename} (for {items[1]}, {items[2]})"
378
+ tupled_list.append((name, value))
379
+ return tupled_list
380
+
381
+
382
+ def update_lora_dict(path):
383
+ global loras_dict
384
+ key = escape_lora_basename(Path(path).stem)
385
+ if key in loras_dict.keys(): return
386
+ items = get_civitai_info(path)
387
+ if items == None: return
388
+ loras_dict[key] = items
389
+
390
+
391
+ def download_lora(dl_urls: str):
392
+ global loras_url_to_path_dict
393
+ dl_path = ""
394
+ before = get_local_model_list(directory_loras)
395
+ urls = []
396
+ for url in [url.strip() for url in dl_urls.split(',')]:
397
+ local_path = f"{directory_loras}/{url.split('/')[-1]}"
398
+ if not Path(local_path).exists():
399
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
400
+ urls.append(url)
401
+ after = get_local_model_list(directory_loras)
402
+ new_files = list_sub(after, before)
403
+ i = 0
404
+ for file in new_files:
405
+ path = Path(file)
406
+ if path.exists():
407
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
408
+ path.resolve().rename(new_path.resolve())
409
+ loras_url_to_path_dict[urls[i]] = str(new_path)
410
+ update_lora_dict(str(new_path))
411
+ dl_path = str(new_path)
412
+ i += 1
413
+ return dl_path
414
+
415
+
416
+ def copy_lora(path: str, new_path: str):
417
+ import shutil
418
+ if path == new_path: return new_path
419
+ cpath = Path(path)
420
+ npath = Path(new_path)
421
+ if cpath.exists():
422
+ try:
423
+ shutil.copy(str(cpath.resolve()), str(npath.resolve()))
424
+ except Exception:
425
+ return None
426
+ update_lora_dict(str(npath))
427
+ return new_path
428
+ else:
429
+ return None
430
+
431
+
432
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
433
+ path = download_lora(dl_urls)
434
+ if path:
435
+ if not lora1 or lora1 == "None":
436
+ lora1 = path
437
+ elif not lora2 or lora2 == "None":
438
+ lora2 = path
439
+ elif not lora3 or lora3 == "None":
440
+ lora3 = path
441
+ elif not lora4 or lora4 == "None":
442
+ lora4 = path
443
+ elif not lora5 or lora5 == "None":
444
+ lora5 = path
445
+ choices = get_all_lora_tupled_list()
446
+ return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
447
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
448
+
449
+
450
+ def get_valid_lora_name(query: str):
451
+ path = "None"
452
+ if not query or query == "None": return "None"
453
+ if to_lora_key(query) in loras_dict.keys(): return query
454
+ if query in loras_url_to_path_dict.keys():
455
+ path = loras_url_to_path_dict[query]
456
+ else:
457
+ path = to_lora_path(query.strip().split('/')[-1])
458
+ if Path(path).exists():
459
+ return path
460
+ elif "http" in query:
461
+ dl_file = download_lora(query)
462
+ if dl_file and Path(dl_file).exists(): return dl_file
463
+ else:
464
+ dl_file = find_similar_lora(query)
465
+ if dl_file and Path(dl_file).exists(): return dl_file
466
+ return "None"
467
+
468
+
469
+ def get_valid_lora_path(query: str):
470
+ path = None
471
+ if not query or query == "None": return None
472
+ if to_lora_key(query) in loras_dict.keys(): return query
473
+ if Path(path).exists():
474
+ return path
475
+ else:
476
+ return None
477
+
478
+
479
+ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
480
+ import re
481
+ wt = lora_wt
482
+ result = re.findall(f'<lora:{to_lora_key(lora_path)}:(.+?)>', prompt)
483
+ if not result: return wt
484
+ wt = safe_float(result[0][0])
485
+ return wt
486
+
487
+
488
+ def set_prompt_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
489
+ import re
490
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
491
+ lora1 = get_valid_lora_name(lora1)
492
+ lora2 = get_valid_lora_name(lora2)
493
+ lora3 = get_valid_lora_name(lora3)
494
+ lora4 = get_valid_lora_name(lora4)
495
+ lora5 = get_valid_lora_name(lora5)
496
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
497
+ lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
498
+ lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
499
+ lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
500
+ lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
501
+ lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
502
+ on1, label1, tag1, md1 = get_lora_info(lora1)
503
+ on2, label2, tag2, md2 = get_lora_info(lora2)
504
+ on3, label3, tag3, md3 = get_lora_info(lora3)
505
+ on4, label4, tag4, md4 = get_lora_info(lora4)
506
+ on5, label5, tag5, md5 = get_lora_info(lora5)
507
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
508
+ prompts = prompt.split(",") if prompt else []
509
+ for p in prompts:
510
+ p = str(p).strip()
511
+ if "<lora" in p:
512
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
513
+ if not result: continue
514
+ key = result[0][0]
515
+ wt = result[0][1]
516
+ path = to_lora_path(key)
517
+ if not key in loras_dict.keys() or not path:
518
+ path = get_valid_lora_name(path)
519
+ if not path or path == "None": continue
520
+ if path in lora_paths:
521
+ continue
522
+ elif not on1:
523
+ lora1 = path
524
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
525
+ lora1_wt = safe_float(wt)
526
+ on1 = True
527
+ elif not on2:
528
+ lora2 = path
529
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
530
+ lora2_wt = safe_float(wt)
531
+ on2 = True
532
+ elif not on3:
533
+ lora3 = path
534
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
535
+ lora3_wt = safe_float(wt)
536
+ on3 = True
537
+ elif not on4:
538
+ lora4 = path
539
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
540
+ lora4_wt = safe_float(wt)
541
+ on4, label4, tag4, md4 = get_lora_info(lora4)
542
+ elif not on5:
543
+ lora5 = path
544
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
545
+ lora5_wt = safe_float(wt)
546
+ on5 = True
547
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
548
+
549
+
550
+ def get_lora_info(lora_path: str):
551
+ is_valid = False
552
+ tag = ""
553
+ label = ""
554
+ md = "None"
555
+ if not lora_path or lora_path == "None":
556
+ print("LoRA file not found.")
557
+ return is_valid, label, tag, md
558
+ path = Path(lora_path)
559
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
560
+ if not to_lora_key(str(new_path)) in loras_dict.keys() and str(path) not in set(get_all_lora_list()):
561
+ print("LoRA file is not registered.")
562
+ return tag, label, tag, md
563
+ if not new_path.exists():
564
+ download_private_file_from_somewhere(str(path), True)
565
+ basename = new_path.stem
566
+ label = f'Name: {basename}'
567
+ items = loras_dict.get(basename, None)
568
+ if items == None:
569
+ items = get_civitai_info(str(new_path))
570
+ if items != None:
571
+ loras_dict[basename] = items
572
+ if items and items[2] != "":
573
+ tag = items[0]
574
+ label = f'Name: {basename}'
575
+ if items[1] == "Pony":
576
+ label = f'Name: {basename} (for Pony🐴)'
577
+ if items[4]:
578
+ md = f'<img src="{items[4]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL]({items[3]})'
579
+ elif items[3]:
580
+ md = f'[LoRA Model URL]({items[3]})'
581
+ is_valid = True
582
+ return is_valid, label, tag, md
583
+
584
+
585
+ def normalize_prompt_list(tags: list[str]):
586
+ prompts = []
587
+ for tag in tags:
588
+ tag = str(tag).strip()
589
+ if tag:
590
+ prompts.append(tag)
591
+ return prompts
592
+
593
+
594
+ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
595
+ if lora_info == "None": return gr.update(value=prompt)
596
+ tags = prompt.split(",") if prompt else []
597
+ prompts = normalize_prompt_list(tags)
598
+
599
+ lora_tag = lora_info.replace("/",",")
600
+ lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
601
+ lora_prompts = normalize_prompt_list(lora_tags)
602
+
603
+ empty = [""]
604
+ prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
605
+ return gr.update(value=prompt)
606
+
607
+
608
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
609
+ import re
610
+ on1, label1, tag1, md1 = get_lora_info(lora1)
611
+ on2, label2, tag2, md2 = get_lora_info(lora2)
612
+ on3, label3, tag3, md3 = get_lora_info(lora3)
613
+ on4, label4, tag4, md4 = get_lora_info(lora4)
614
+ on5, label5, tag5, md5 = get_lora_info(lora5)
615
+ lora_paths = [lora1, lora2, lora3, lora4, lora5]
616
+
617
+ output_prompt = prompt
618
+ if "Classic" in str(prompt_syntax):
619
+ prompts = prompt.split(",") if prompt else []
620
+ output_prompts = []
621
+ for p in prompts:
622
+ p = str(p).strip()
623
+ if "<lora" in p:
624
+ result = re.findall(r'<lora:(.+?):(.+?)>', p)
625
+ if not result: continue
626
+ key = result[0][0]
627
+ wt = result[0][1]
628
+ path = to_lora_path(key)
629
+ if not key in loras_dict.keys() or not path: continue
630
+ if path in lora_paths:
631
+ output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
632
+ elif p:
633
+ output_prompts.append(p)
634
+ lora_prompts = []
635
+ if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
636
+ if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
637
+ if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
638
+ if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
639
+ if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
640
+ output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
641
+ choices = get_all_lora_tupled_list()
642
+
643
+ return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
644
+ gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
645
+ gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
646
+ gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
647
+ gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
648
+ gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
649
+ gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
650
+ gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
651
+ gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
652
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
653
+
654
+
655
+ def get_my_lora(link_url):
656
+ from pathlib import Path
657
+ before = get_local_model_list(directory_loras)
658
+ for url in [url.strip() for url in link_url.split(',')]:
659
+ if not Path(f"{directory_loras}/{url.split('/')[-1]}").exists():
660
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
661
+ after = get_local_model_list(directory_loras)
662
+ new_files = list_sub(after, before)
663
+ for file in new_files:
664
+ path = Path(file)
665
+ if path.exists():
666
+ new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
667
+ path.resolve().rename(new_path.resolve())
668
+ update_lora_dict(str(new_path))
669
+ new_lora_model_list = get_lora_model_list()
670
+ new_lora_tupled_list = get_all_lora_tupled_list()
671
+
672
+ return gr.update(
673
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
674
+ ), gr.update(
675
+ choices=new_lora_tupled_list
676
+ ), gr.update(
677
+ choices=new_lora_tupled_list
678
+ ), gr.update(
679
+ choices=new_lora_tupled_list
680
+ ), gr.update(
681
+ choices=new_lora_tupled_list
682
+ )
683
+
684
+
685
+ def upload_file_lora(files, progress=gr.Progress(track_tqdm=True)):
686
+ progress(0, desc="Uploading...")
687
+ file_paths = [file.name for file in files]
688
+ progress(1, desc="Uploaded.")
689
+ return gr.update(value=file_paths, visible=True), gr.update(visible=True)
690
+
691
+
692
+ def move_file_lora(filepaths):
693
+ import shutil
694
+ for file in filepaths:
695
+ path = Path(shutil.move(Path(file).resolve(), Path(f"./{directory_loras}").resolve()))
696
+ newpath = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
697
+ path.resolve().rename(newpath.resolve())
698
+ update_lora_dict(str(newpath))
699
+
700
+ new_lora_model_list = get_lora_model_list()
701
+ new_lora_tupled_list = get_all_lora_tupled_list()
702
+
703
+ return gr.update(
704
+ choices=new_lora_tupled_list, value=new_lora_model_list[-1]
705
+ ), gr.update(
706
+ choices=new_lora_tupled_list
707
+ ), gr.update(
708
+ choices=new_lora_tupled_list
709
+ ), gr.update(
710
+ choices=new_lora_tupled_list
711
+ ), gr.update(
712
+ choices=new_lora_tupled_list
713
+ )
714
+
715
+
716
+ def get_civitai_info(path):
717
+ global civitai_not_exists_list
718
+ global loras_url_to_path_dict
719
+ import requests
720
+ from requests.adapters import HTTPAdapter
721
+ from urllib3.util import Retry
722
+ default = ["", "", "", "", ""]
723
+ if path in set(civitai_not_exists_list): return default
724
+ if not Path(path).exists(): return None
725
+ user_agent = get_user_agent()
726
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
727
+ base_url = 'https://civitai.com/api/v1/model-versions/by-hash/'
728
+ params = {}
729
+ session = requests.Session()
730
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
731
+ session.mount("https://", HTTPAdapter(max_retries=retries))
732
+ import hashlib
733
+ with open(path, 'rb') as file:
734
+ file_data = file.read()
735
+ hash_sha256 = hashlib.sha256(file_data).hexdigest()
736
+ url = base_url + hash_sha256
737
+ try:
738
+ r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
739
+ except Exception as e:
740
+ print(e)
741
+ return default
742
+ else:
743
+ if not r.ok: return None
744
+ json = r.json()
745
+ if 'baseModel' not in json:
746
+ civitai_not_exists_list.append(path)
747
+ return default
748
+ items = []
749
+ items.append(" / ".join(json['trainedWords'])) # The words (prompts) used to trigger the model
750
+ items.append(json['baseModel']) # Base model (SDXL1.0, Pony, ...)
751
+ items.append(json['model']['name']) # The name of the model version
752
+ items.append(f"https://civitai.com/models/{json['modelId']}") # The repo url for the model
753
+ items.append(json['images'][0]['url']) # The url for a sample image
754
+ loras_url_to_path_dict[path] = json['downloadUrl'] # The download url to get the model file for this specific version
755
+ return items
756
+
757
+
758
+ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1.0"], limit: int = 100):
759
+ import requests
760
+ from requests.adapters import HTTPAdapter
761
+ from urllib3.util import Retry
762
+ if not query: return None
763
+ user_agent = get_user_agent()
764
+ headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
765
+ base_url = 'https://civitai.com/api/v1/models'
766
+ params = {'query': query, 'types': ['LORA'], 'sort': 'Highest Rated', 'period': 'AllTime',
767
+ 'nsfw': 'true', 'supportsGeneration ': 'true', 'limit': limit}
768
+ session = requests.Session()
769
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
770
+ session.mount("https://", HTTPAdapter(max_retries=retries))
771
+ try:
772
+ r = session.get(base_url, params=params, headers=headers, stream=True, timeout=(3.0, 30))
773
+ except Exception as e:
774
+ print(e)
775
+ return None
776
+ else:
777
+ if not r.ok: return None
778
+ json = r.json()
779
+ if 'items' not in json: return None
780
+ items = []
781
+ for j in json['items']:
782
+ for model in j['modelVersions']:
783
+ item = {}
784
+ if model['baseModel'] not in set(allow_model): continue
785
+ item['name'] = j['name']
786
+ item['creator'] = j['creator']['username']
787
+ item['tags'] = j['tags']
788
+ item['model_name'] = model['name']
789
+ item['base_model'] = model['baseModel']
790
+ item['dl_url'] = model['downloadUrl']
791
+ item['md'] = f'<img src="{model["images"][0]["url"]}" alt="thumbnail" width="150" height="240"><br>[LoRA Model URL](https://civitai.com/models/{j["id"]})'
792
+ items.append(item)
793
+ return items
794
+
795
+
796
+ def search_civitai_lora(query, base_model):
797
+ global civitai_lora_last_results
798
+ items = search_lora_on_civitai(query, base_model)
799
+ if not items: return gr.update(choices=[("", "")], value="", visible=False),\
800
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
801
+ civitai_lora_last_results = {}
802
+ choices = []
803
+ for item in items:
804
+ base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
805
+ name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
806
+ value = item['dl_url']
807
+ choices.append((name, value))
808
+ civitai_lora_last_results[value] = item
809
+ if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
810
+ gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True)
811
+ result = civitai_lora_last_results.get(choices[0][1], "None")
812
+ md = result['md'] if result else ""
813
+ return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
814
+ gr.update(visible=True), gr.update(visible=True)
815
+
816
+
817
+ def select_civitai_lora(search_result):
818
+ if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
819
+ result = civitai_lora_last_results.get(search_result, "None")
820
+ md = result['md'] if result else ""
821
+ return gr.update(value=search_result), gr.update(value=md, visible=True)
822
+
823
+
824
+ def find_similar_lora(q: str):
825
+ from rapidfuzz.process import extractOne
826
+ from rapidfuzz.utils import default_process
827
+ query = to_lora_key(q)
828
+ print(f"Finding <lora:{query}:...>...")
829
+ keys = list(private_lora_dict.keys())
830
+ values = [x[2] for x in list(private_lora_dict.values())]
831
+ s = default_process(query)
832
+ e1 = extractOne(s, keys + values, processor=default_process, score_cutoff=80.0)
833
+ key = ""
834
+ if e1:
835
+ e = e1[0]
836
+ if e in set(keys): key = e
837
+ elif e in set(values): key = keys[values.index(e)]
838
+ if key:
839
+ path = to_lora_path(key)
840
+ new_path = to_lora_path(query)
841
+ if not Path(path).exists():
842
+ if not Path(new_path).exists(): download_private_file_from_somewhere(path, True)
843
+ if Path(path).exists() and copy_lora(path, new_path): return new_path
844
+ print(f"Finding <lora:{query}:...> on Civitai...")
845
+ civitai_query = Path(query).stem if Path(query).is_file() else query
846
+ civitai_query = civitai_query.replace("_", " ").replace("-", " ")
847
+ base_model = ["Pony", "SDXL 1.0"]
848
+ items = search_lora_on_civitai(civitai_query, base_model, 1)
849
+ if items:
850
+ item = items[0]
851
+ path = download_lora(item['dl_url'])
852
+ new_path = query if Path(query).is_file() else to_lora_path(query)
853
+ if path and copy_lora(path, new_path): return new_path
854
+ return None
855
+
856
+
857
+ def change_interface_mode(mode: str):
858
+ if mode == "Fast":
859
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
860
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
861
+ gr.update(visible=True), gr.update(value="Fast")
862
+ elif mode == "Simple": # t2i mode
863
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
864
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=False), gr.update(open=True),\
865
+ gr.update(visible=False), gr.update(value="Standard")
866
+ elif mode == "LoRA": # t2i LoRA mode
867
+ return gr.update(open=True), gr.update(visible=True), gr.update(open=True), gr.update(open=False),\
868
+ gr.update(visible=True), gr.update(open=True), gr.update(visible=True), gr.update(open=False),\
869
+ gr.update(visible=False), gr.update(value="Standard")
870
+ else: # Standard
871
+ return gr.update(open=False), gr.update(visible=True), gr.update(open=False), gr.update(open=False),\
872
+ gr.update(visible=True), gr.update(open=False), gr.update(visible=True), gr.update(open=False),\
873
+ gr.update(visible=True), gr.update(value="Standard")
874
+
875
+
876
+ quality_prompt_list = [
877
+ {
878
+ "name": "None",
879
+ "prompt": "",
880
+ "negative_prompt": "lowres",
881
+ },
882
+ {
883
+ "name": "Animagine Common",
884
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres",
885
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
886
+ },
887
+ {
888
+ "name": "Pony Anime Common",
889
+ "prompt": "source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres",
890
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
891
+ },
892
+ {
893
+ "name": "Pony Common",
894
+ "prompt": "source_anime, score_9, score_8_up, score_7_up",
895
+ "negative_prompt": "source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends",
896
+ },
897
+ {
898
+ "name": "Animagine Standard v3.0",
899
+ "prompt": "masterpiece, best quality",
900
+ "negative_prompt": "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name",
901
+ },
902
+ {
903
+ "name": "Animagine Standard v3.1",
904
+ "prompt": "masterpiece, best quality, very aesthetic, absurdres",
905
+ "negative_prompt": "lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
906
+ },
907
+ {
908
+ "name": "Animagine Light v3.1",
909
+ "prompt": "(masterpiece), best quality, very aesthetic, perfect face",
910
+ "negative_prompt": "(low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn",
911
+ },
912
+ {
913
+ "name": "Animagine Heavy v3.1",
914
+ "prompt": "(masterpiece), (best quality), (ultra-detailed), very aesthetic, illustration, disheveled hair, perfect composition, moist skin, intricate details",
915
+ "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair, extra digit, fewer digits, cropped, worst quality, low quality, very displeasing",
916
+ },
917
+ ]
918
+
919
+
920
+ style_list = [
921
+ {
922
+ "name": "None",
923
+ "prompt": "",
924
+ "negative_prompt": "",
925
+ },
926
+ {
927
+ "name": "Cinematic",
928
+ "prompt": "cinematic still, emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
929
+ "negative_prompt": "cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured",
930
+ },
931
+ {
932
+ "name": "Photographic",
933
+ "prompt": "cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed",
934
+ "negative_prompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly",
935
+ },
936
+ {
937
+ "name": "Anime",
938
+ "prompt": "anime artwork, anime style, vibrant, studio anime, highly detailed",
939
+ "negative_prompt": "photo, deformed, black and white, realism, disfigured, low contrast",
940
+ },
941
+ {
942
+ "name": "Manga",
943
+ "prompt": "manga style, vibrant, high-energy, detailed, iconic, Japanese comic style",
944
+ "negative_prompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, Western comic style",
945
+ },
946
+ {
947
+ "name": "Digital Art",
948
+ "prompt": "concept art, digital artwork, illustrative, painterly, matte painting, highly detailed",
949
+ "negative_prompt": "photo, photorealistic, realism, ugly",
950
+ },
951
+ {
952
+ "name": "Pixel art",
953
+ "prompt": "pixel-art, low-res, blocky, pixel art style, 8-bit graphics",
954
+ "negative_prompt": "sloppy, messy, blurry, noisy, highly detailed, ultra textured, photo, realistic",
955
+ },
956
+ {
957
+ "name": "Fantasy art",
958
+ "prompt": "ethereal fantasy concept art, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
959
+ "negative_prompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white",
960
+ },
961
+ {
962
+ "name": "Neonpunk",
963
+ "prompt": "neonpunk style, cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
964
+ "negative_prompt": "painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured",
965
+ },
966
+ {
967
+ "name": "3D Model",
968
+ "prompt": "professional 3d model, octane render, highly detailed, volumetric, dramatic lighting",
969
+ "negative_prompt": "ugly, deformed, noisy, low poly, blurry, painting",
970
+ },
971
+ ]
972
+
973
+
974
+ optimization_list = {
975
+ "None": [28, 7., 'Euler a', False, 'None', 1.],
976
+ "Default": [28, 7., 'Euler a', False, 'None', 1.],
977
+ "SPO": [28, 7., 'Euler a', True, 'loras/spo_sdxl_10ep_4k-data_lora_diffusers.safetensors', 1.],
978
+ "DPO": [28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.],
979
+ "DPO Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.],
980
+ "SDXL Turbo": [8, 2.5, 'LCM', True, 'loras/sd_xl_turbo_lora_v1.safetensors', 1.],
981
+ "Hyper-SDXL 12step": [12, 5., 'TCD', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.],
982
+ "Hyper-SDXL 8step": [8, 5., 'TCD', True, 'loras/Hyper-SDXL-8steps-CFG-lora.safetensors', 1.],
983
+ "Hyper-SDXL 4step": [4, 0, 'TCD', True, 'loras/Hyper-SDXL-4steps-lora.safetensors', 1.],
984
+ "Hyper-SDXL 2step": [2, 0, 'TCD', True, 'loras/Hyper-SDXL-2steps-lora.safetensors', 1.],
985
+ "Hyper-SDXL 1step": [1, 0, 'TCD', True, 'loras/Hyper-SDXL-1steps-lora.safetensors', 1.],
986
+ "PCM 16step": [16, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_16step_converted.safetensors', 1.],
987
+ "PCM 8step": [8, 4., 'Euler a trailing', True, 'loras/pcm_sdxl_normalcfg_8step_converted.safetensors', 1.],
988
+ "PCM 4step": [4, 2., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_4step_converted.safetensors', 1.],
989
+ "PCM 2step": [2, 1., 'Euler a trailing', True, 'loras/pcm_sdxl_smallcfg_2step_converted.safetensors', 1.],
990
+ }
991
+
992
+
993
+ def set_optimization(opt, steps_gui, cfg_gui, sampler_gui, clip_skip_gui, lora_gui, lora_scale_gui):
994
+ if not opt in list(optimization_list.keys()): opt = "None"
995
+ def_steps_gui = 28
996
+ def_cfg_gui = 7.
997
+ steps = optimization_list.get(opt, "None")[0]
998
+ cfg = optimization_list.get(opt, "None")[1]
999
+ sampler = optimization_list.get(opt, "None")[2]
1000
+ clip_skip = optimization_list.get(opt, "None")[3]
1001
+ lora = optimization_list.get(opt, "None")[4]
1002
+ lora_scale = optimization_list.get(opt, "None")[5]
1003
+ if opt == "None":
1004
+ steps = max(steps_gui, def_steps_gui)
1005
+ cfg = max(cfg_gui, def_cfg_gui)
1006
+ clip_skip = clip_skip_gui
1007
+ elif opt == "SPO" or opt == "DPO":
1008
+ steps = max(steps_gui, def_steps_gui)
1009
+ cfg = max(cfg_gui, def_cfg_gui)
1010
+
1011
+ return gr.update(value=steps), gr.update(value=cfg), gr.update(value=sampler),\
1012
+ gr.update(value=clip_skip), gr.update(value=lora), gr.update(value=lora_scale),
1013
+
1014
+
1015
+ # [sampler_gui, steps_gui, cfg_gui, clip_skip_gui, img_width_gui, img_height_gui, optimization_gui]
1016
+ preset_sampler_setting = {
1017
+ "None": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1018
+ "Anime 3:4 Fast": ["LCM", 8, 2.5, True, 896, 1152, "DPO Turbo"],
1019
+ "Anime 3:4 Standard": ["Euler a", 28, 7., True, 896, 1152, "None"],
1020
+ "Anime 3:4 Heavy": ["Euler a", 40, 7., True, 896, 1152, "None"],
1021
+ "Anime 1:1 Fast": ["LCM", 8, 2.5, True, 1024, 1024, "DPO Turbo"],
1022
+ "Anime 1:1 Standard": ["Euler a", 28, 7., True, 1024, 1024, "None"],
1023
+ "Anime 1:1 Heavy": ["Euler a", 40, 7., True, 1024, 1024, "None"],
1024
+ "Photo 3:4 Fast": ["LCM", 8, 2.5, False, 896, 1152, "DPO Turbo"],
1025
+ "Photo 3:4 Standard": ["DPM++ 2M Karras", 28, 7., False, 896, 1152, "None"],
1026
+ "Photo 3:4 Heavy": ["DPM++ 2M Karras", 40, 7., False, 896, 1152, "None"],
1027
+ "Photo 1:1 Fast": ["LCM", 8, 2.5, False, 1024, 1024, "DPO Turbo"],
1028
+ "Photo 1:1 Standard": ["DPM++ 2M Karras", 28, 7., False, 1024, 1024, "None"],
1029
+ "Photo 1:1 Heavy": ["DPM++ 2M Karras", 40, 7., False, 1024, 1024, "None"],
1030
+ }
1031
+
1032
+
1033
+ def set_sampler_settings(sampler_setting):
1034
+ if not sampler_setting in list(preset_sampler_setting.keys()) or sampler_setting == "None":
1035
+ return gr.update(value="Euler a"), gr.update(value=28), gr.update(value=7.), gr.update(value=True),\
1036
+ gr.update(value=1024), gr.update(value=1024), gr.update(value="None")
1037
+ v = preset_sampler_setting.get(sampler_setting, ["Euler a", 28, 7., True, 1024, 1024])
1038
+ # sampler, steps, cfg, clip_skip, width, height, optimization
1039
+ return gr.update(value=v[0]), gr.update(value=v[1]), gr.update(value=v[2]), gr.update(value=v[3]),\
1040
+ gr.update(value=v[4]), gr.update(value=v[5]), gr.update(value=v[6])
1041
+
1042
+
1043
+ preset_styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
1044
+ preset_quality = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in quality_prompt_list}
1045
+
1046
+
1047
+ def process_style_prompt(prompt: str, neg_prompt: str, styles_key: str = "None", quality_key: str = "None", type: str = "Auto"):
1048
+ def to_list(s):
1049
+ return [x.strip() for x in s.split(",") if not s == ""]
1050
+
1051
+ def list_sub(a, b):
1052
+ return [e for e in a if e not in b]
1053
+
1054
+ def list_uniq(l):
1055
+ return sorted(set(l), key=l.index)
1056
+
1057
+ animagine_ps = to_list("anime artwork, anime style, vibrant, studio anime, highly detailed, masterpiece, best quality, very aesthetic, absurdres")
1058
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
1059
+ pony_ps = to_list("source_anime, score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
1060
+ pony_nps = to_list("source_pony, source_furry, source_cartoon, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
1061
+ prompts = to_list(prompt)
1062
+ neg_prompts = to_list(neg_prompt)
1063
+
1064
+ all_styles_ps = []
1065
+ all_styles_nps = []
1066
+ for d in style_list:
1067
+ all_styles_ps.extend(to_list(str(d.get("prompt", ""))))
1068
+ all_styles_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1069
+
1070
+ all_quality_ps = []
1071
+ all_quality_nps = []
1072
+ for d in quality_prompt_list:
1073
+ all_quality_ps.extend(to_list(str(d.get("prompt", ""))))
1074
+ all_quality_nps.extend(to_list(str(d.get("negative_prompt", ""))))
1075
+
1076
+ quality_ps = to_list(preset_quality[quality_key][0])
1077
+ quality_nps = to_list(preset_quality[quality_key][1])
1078
+ styles_ps = to_list(preset_styles[styles_key][0])
1079
+ styles_nps = to_list(preset_styles[styles_key][1])
1080
+
1081
+ prompts = list_sub(prompts, animagine_ps + pony_ps + all_styles_ps + all_quality_ps)
1082
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + all_styles_nps + all_quality_nps)
1083
+
1084
+ last_empty_p = [""] if not prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1085
+ last_empty_np = [""] if not neg_prompts and type != "None" and type != "Auto" and styles_key != "None" and quality_key != "None" else []
1086
+
1087
+ if type == "Animagine":
1088
+ prompts = prompts + animagine_ps
1089
+ neg_prompts = neg_prompts + animagine_nps
1090
+ elif type == "Pony":
1091
+ prompts = prompts + pony_ps
1092
+ neg_prompts = neg_prompts + pony_nps
1093
+
1094
+ prompts = prompts + styles_ps + quality_ps
1095
+ neg_prompts = neg_prompts + styles_nps + quality_nps
1096
+
1097
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
1098
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
1099
+
1100
+ return gr.update(value=prompt), gr.update(value=neg_prompt), gr.update(value=type)
1101
+
1102
+
1103
+ def set_quick_presets(genre:str = "None", type:str = "Auto", speed:str = "None", aspect:str = "None"):
1104
+ quality = "None"
1105
+ style = "None"
1106
+ sampler = "None"
1107
+ opt = "None"
1108
+
1109
+ if genre == "Anime":
1110
+ if type != "None" and type != "Auto": style = "Anime"
1111
+ if aspect == "1:1":
1112
+ if speed == "Heavy":
1113
+ sampler = "Anime 1:1 Heavy"
1114
+ elif speed == "Fast":
1115
+ sampler = "Anime 1:1 Fast"
1116
+ else:
1117
+ sampler = "Anime 1:1 Standard"
1118
+ elif aspect == "3:4":
1119
+ if speed == "Heavy":
1120
+ sampler = "Anime 3:4 Heavy"
1121
+ elif speed == "Fast":
1122
+ sampler = "Anime 3:4 Fast"
1123
+ else:
1124
+ sampler = "Anime 3:4 Standard"
1125
+ if type == "Pony":
1126
+ quality = "Pony Anime Common"
1127
+ elif type == "Animagine":
1128
+ quality = "Animagine Common"
1129
+ else:
1130
+ quality = "None"
1131
+ elif genre == "Photo":
1132
+ if type != "None" and type != "Auto": style = "Photographic"
1133
+ if aspect == "1:1":
1134
+ if speed == "Heavy":
1135
+ sampler = "Photo 1:1 Heavy"
1136
+ elif speed == "Fast":
1137
+ sampler = "Photo 1:1 Fast"
1138
+ else:
1139
+ sampler = "Photo 1:1 Standard"
1140
+ elif aspect == "3:4":
1141
+ if speed == "Heavy":
1142
+ sampler = "Photo 3:4 Heavy"
1143
+ elif speed == "Fast":
1144
+ sampler = "Photo 3:4 Fast"
1145
+ else:
1146
+ sampler = "Photo 3:4 Standard"
1147
+ if type == "Pony":
1148
+ quality = "Pony Common"
1149
+ else:
1150
+ quality = "None"
1151
+
1152
+ if speed == "Fast":
1153
+ opt = "DPO Turbo"
1154
+ if genre == "Anime" and type != "Pony" and type != "Auto": quality = "Animagine Light v3.1"
1155
+
1156
+ return gr.update(value=quality), gr.update(value=style), gr.update(value=sampler), gr.update(value=opt), gr.update(value=type)
1157
+
1158
+
1159
+ textual_inversion_dict = {}
1160
+ try:
1161
+ with open('textual_inversion_dict.json', encoding='utf-8') as f:
1162
+ textual_inversion_dict = json.load(f)
1163
+ except Exception:
1164
+ pass
1165
+ textual_inversion_file_token_list = []
1166
+
1167
+
1168
+ def get_tupled_embed_list(embed_list):
1169
+ global textual_inversion_file_list
1170
+ tupled_list = []
1171
+ for file in embed_list:
1172
+ token = textual_inversion_dict.get(Path(file).name, [Path(file).stem.replace(",",""), False])[0]
1173
+ tupled_list.append((token, file))
1174
+ textual_inversion_file_token_list.append(token)
1175
+ return tupled_list
1176
+
1177
+
1178
+ def set_textual_inversion_prompt(textual_inversion_gui, prompt_gui, neg_prompt_gui, prompt_syntax_gui):
1179
+ ti_tags = list(textual_inversion_dict.values()) + textual_inversion_file_token_list
1180
+ tags = prompt_gui.split(",") if prompt_gui else []
1181
+ prompts = []
1182
+ for tag in tags:
1183
+ tag = str(tag).strip()
1184
+ if tag and not tag in ti_tags:
1185
+ prompts.append(tag)
1186
+ ntags = neg_prompt_gui.split(",") if neg_prompt_gui else []
1187
+ neg_prompts = []
1188
+ for tag in ntags:
1189
+ tag = str(tag).strip()
1190
+ if tag and not tag in ti_tags:
1191
+ neg_prompts.append(tag)
1192
+ ti_prompts = []
1193
+ ti_neg_prompts = []
1194
+ for ti in textual_inversion_gui:
1195
+ tokens = textual_inversion_dict.get(Path(ti).name, [Path(ti).stem.replace(",",""), False])
1196
+ is_positive = tokens[1] == True or "positive" in Path(ti).parent.name
1197
+ if is_positive: # positive prompt
1198
+ ti_prompts.append(tokens[0])
1199
+ else: # negative prompt (default)
1200
+ ti_neg_prompts.append(tokens[0])
1201
+ empty = [""]
1202
+ prompt = ", ".join(prompts + ti_prompts + empty)
1203
+ neg_prompt = ", ".join(neg_prompts + ti_neg_prompts + empty)
1204
+ return gr.update(value=prompt), gr.update(value=neg_prompt),
1205
+
1206
+
1207
+ def get_model_pipeline(repo_id: str):
1208
+ from huggingface_hub import HfApi
1209
+ api = HfApi()
1210
+ default = "StableDiffusionPipeline"
1211
+ try:
1212
+ if " " in repo_id or not api.repo_exists(repo_id): return default
1213
+ model = api.model_info(repo_id=repo_id)
1214
+ except Exception as e:
1215
+ return default
1216
+ if model.private or model.gated: return default
1217
+ tags = model.tags
1218
+ if not 'diffusers' in tags: return default
1219
+ if 'diffusers:StableDiffusionXLPipeline' in tags:
1220
+ return "StableDiffusionXLPipeline"
1221
+ elif 'diffusers:StableDiffusionPipeline' in tags:
1222
+ return "StableDiffusionPipeline"
1223
+ else:
1224
+ return default
1225
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git-lfs aria2 -y ffmpeg
pre-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pip>=23.0.0
requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ spaces
2
+ accelerate
3
+ diffusers
4
+ invisible_watermark
5
+ transformers
6
+ xformers
7
+ git+https://github.com/R3gm/stablepy.git@dev2
8
+ torch==2.2.0
9
+ gdown
10
+ opencv-python
11
+ huggingface_hub
12
+ scikit-build-core
13
+ https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.90-cu124/llama_cpp_python-0.2.90-cp310-cp310-linux_x86_64.whl
14
+ git+https://github.com/Maximilian-Winter/llama-cpp-agent
15
+ pybind11>=2.12
16
+ rapidfuzz
17
+ torchvision
18
+ optimum[onnxruntime]
19
+ dartrs
20
+ httpx==0.13.3
21
+ httpcore
22
+ googletrans==4.0.0rc1
23
+ timm
24
+ wrapt-timeout-decorator
tagger/character_series_dict.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/danbooru_e621.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/fl2sd3longcap.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoProcessor, AutoModelForCausalLM
2
+ import spaces
3
+ import re
4
+ from PIL import Image
5
+ import torch
6
+
7
+ import subprocess
8
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
+
10
+ device = "cuda" if torch.cuda.is_available() else "cpu"
11
+ fl_model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to("cpu").eval()
12
+ fl_processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
13
+
14
+
15
+ def fl_modify_caption(caption: str) -> str:
16
+ """
17
+ Removes specific prefixes from captions if present, otherwise returns the original caption.
18
+ Args:
19
+ caption (str): A string containing a caption.
20
+ Returns:
21
+ str: The caption with the prefix removed if it was present, or the original caption.
22
+ """
23
+ # Define the prefixes to remove
24
+ prefix_substrings = [
25
+ ('captured from ', ''),
26
+ ('captured at ', '')
27
+ ]
28
+
29
+ # Create a regex pattern to match any of the prefixes
30
+ pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings])
31
+ replacers = {opening.lower(): replacer for opening, replacer in prefix_substrings}
32
+
33
+ # Function to replace matched prefix with its corresponding replacement
34
+ def replace_fn(match):
35
+ return replacers[match.group(0).lower()]
36
+
37
+ # Apply the regex to the caption
38
+ modified_caption = re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
39
+
40
+ # If the caption was modified, return the modified version; otherwise, return the original
41
+ return modified_caption if modified_caption != caption else caption
42
+
43
+
44
+ @spaces.GPU(duration=30)
45
+ def fl_run_example(image):
46
+ task_prompt = "<DESCRIPTION>"
47
+ prompt = task_prompt + "Describe this image in great detail."
48
+
49
+ # Ensure the image is in RGB mode
50
+ if image.mode != "RGB":
51
+ image = image.convert("RGB")
52
+
53
+ fl_model.to(device)
54
+ inputs = fl_processor(text=prompt, images=image, return_tensors="pt").to(device)
55
+ generated_ids = fl_model.generate(
56
+ input_ids=inputs["input_ids"],
57
+ pixel_values=inputs["pixel_values"],
58
+ max_new_tokens=1024,
59
+ num_beams=3
60
+ )
61
+ fl_model.to("cpu")
62
+ generated_text = fl_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
63
+ parsed_answer = fl_processor.post_process_generation(generated_text, task=task_prompt, image_size=(image.width, image.height))
64
+ return fl_modify_caption(parsed_answer["<DESCRIPTION>"])
65
+
66
+
67
+ def predict_tags_fl2_sd3(image: Image.Image, input_tags: str, algo: list[str]):
68
+ def to_list(s):
69
+ return [x.strip() for x in s.split(",") if not s == ""]
70
+
71
+ def list_uniq(l):
72
+ return sorted(set(l), key=l.index)
73
+
74
+ if not "Use Florence-2-SD3-Long-Captioner" in algo:
75
+ return input_tags
76
+ tag_list = list_uniq(to_list(input_tags) + to_list(fl_run_example(image) + ", "))
77
+ tag_list.remove("")
78
+ return ", ".join(tag_list)
tagger/output.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class UpsamplingOutput:
6
+ upsampled_tags: str
7
+
8
+ copyright_tags: str
9
+ character_tags: str
10
+ general_tags: str
11
+ rating_tag: str
12
+ aspect_ratio_tag: str
13
+ length_tag: str
14
+ identity_tag: str
15
+
16
+ elapsed_time: float = 0.0
tagger/tag_group.csv ADDED
The diff for this file is too large to render. See raw diff
 
tagger/tagger.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import torch
3
+ import gradio as gr
4
+ import spaces
5
+ from transformers import (
6
+ AutoImageProcessor,
7
+ AutoModelForImageClassification,
8
+ )
9
+ from pathlib import Path
10
+
11
+
12
+ WD_MODEL_NAMES = ["p1atdev/wd-swinv2-tagger-v3-hf"]
13
+ WD_MODEL_NAME = WD_MODEL_NAMES[0]
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ default_device = device
17
+
18
+ try:
19
+ wd_model = AutoModelForImageClassification.from_pretrained(WD_MODEL_NAME, trust_remote_code=True).to(default_device).eval()
20
+ wd_processor = AutoImageProcessor.from_pretrained(WD_MODEL_NAME, trust_remote_code=True)
21
+ except Exception as e:
22
+ print(e)
23
+ wd_model = wd_processor = None
24
+
25
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
26
+ return (
27
+ [f"1{noun}"]
28
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
29
+ + [f"{maximum+1}+{noun}s"]
30
+ )
31
+
32
+
33
+ PEOPLE_TAGS = (
34
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
35
+ )
36
+
37
+
38
+ RATING_MAP = {
39
+ "sfw": "safe",
40
+ "general": "safe",
41
+ "sensitive": "sensitive",
42
+ "questionable": "nsfw",
43
+ "explicit": "explicit, nsfw",
44
+ }
45
+ DANBOORU_TO_E621_RATING_MAP = {
46
+ "sfw": "rating_safe",
47
+ "general": "rating_safe",
48
+ "safe": "rating_safe",
49
+ "sensitive": "rating_safe",
50
+ "nsfw": "rating_explicit",
51
+ "explicit, nsfw": "rating_explicit",
52
+ "explicit": "rating_explicit",
53
+ "rating:safe": "rating_safe",
54
+ "rating:general": "rating_safe",
55
+ "rating:sensitive": "rating_safe",
56
+ "rating:questionable, nsfw": "rating_explicit",
57
+ "rating:explicit, nsfw": "rating_explicit",
58
+ }
59
+
60
+
61
+ # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/blob/a9eacb1eff904552d3012babfa28b57e1d3e295c/tagger/ui.py#L368
62
+ kaomojis = [
63
+ "0_0",
64
+ "(o)_(o)",
65
+ "+_+",
66
+ "+_-",
67
+ "._.",
68
+ "<o>_<o>",
69
+ "<|>_<|>",
70
+ "=_=",
71
+ ">_<",
72
+ "3_3",
73
+ "6_9",
74
+ ">_o",
75
+ "@_@",
76
+ "^_^",
77
+ "o_o",
78
+ "u_u",
79
+ "x_x",
80
+ "|_|",
81
+ "||_||",
82
+ ]
83
+
84
+
85
+ def replace_underline(x: str):
86
+ return x.strip().replace("_", " ") if x not in kaomojis else x.strip()
87
+
88
+
89
+ def to_list(s):
90
+ return [x.strip() for x in s.split(",") if not s == ""]
91
+
92
+
93
+ def list_sub(a, b):
94
+ return [e for e in a if e not in b]
95
+
96
+
97
+ def list_uniq(l):
98
+ return sorted(set(l), key=l.index)
99
+
100
+
101
+ def load_dict_from_csv(filename):
102
+ dict = {}
103
+ if not Path(filename).exists():
104
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
105
+ else: return dict
106
+ try:
107
+ with open(filename, 'r', encoding="utf-8") as f:
108
+ lines = f.readlines()
109
+ except Exception:
110
+ print(f"Failed to open dictionary file: {filename}")
111
+ return dict
112
+ for line in lines:
113
+ parts = line.strip().split(',')
114
+ dict[parts[0]] = parts[1]
115
+ return dict
116
+
117
+
118
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
119
+
120
+
121
+ def character_list_to_series_list(character_list):
122
+ output_series_tag = []
123
+ series_tag = ""
124
+ series_dict = anime_series_dict
125
+ for tag in character_list:
126
+ series_tag = series_dict.get(tag, "")
127
+ if tag.endswith(")"):
128
+ tags = tag.split("(")
129
+ character_tag = "(".join(tags[:-1])
130
+ if character_tag.endswith(" "):
131
+ character_tag = character_tag[:-1]
132
+ series_tag = tags[-1].replace(")", "")
133
+
134
+ if series_tag:
135
+ output_series_tag.append(series_tag)
136
+
137
+ return output_series_tag
138
+
139
+
140
+ def select_random_character(series: str, character: str):
141
+ from random import seed, randrange
142
+ seed()
143
+ character_list = list(anime_series_dict.keys())
144
+ character = character_list[randrange(len(character_list) - 1)]
145
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
146
+ return series, character
147
+
148
+
149
+ def danbooru_to_e621(dtag, e621_dict):
150
+ def d_to_e(match, e621_dict):
151
+ dtag = match.group(0)
152
+ etag = e621_dict.get(replace_underline(dtag), "")
153
+ if etag:
154
+ return etag
155
+ else:
156
+ return dtag
157
+
158
+ import re
159
+ tag = re.sub(r'[\w ]+', lambda wrapper: d_to_e(wrapper, e621_dict), dtag, 2)
160
+ return tag
161
+
162
+
163
+ danbooru_to_e621_dict = load_dict_from_csv('danbooru_e621.csv')
164
+
165
+
166
+ def convert_danbooru_to_e621_prompt(input_prompt: str = "", prompt_type: str = "danbooru"):
167
+ if prompt_type == "danbooru": return input_prompt
168
+ tags = input_prompt.split(",") if input_prompt else []
169
+ people_tags: list[str] = []
170
+ other_tags: list[str] = []
171
+ rating_tags: list[str] = []
172
+
173
+ e621_dict = danbooru_to_e621_dict
174
+ for tag in tags:
175
+ tag = replace_underline(tag)
176
+ tag = danbooru_to_e621(tag, e621_dict)
177
+ if tag in PEOPLE_TAGS:
178
+ people_tags.append(tag)
179
+ elif tag in DANBOORU_TO_E621_RATING_MAP.keys():
180
+ rating_tags.append(DANBOORU_TO_E621_RATING_MAP.get(tag.replace(" ",""), ""))
181
+ else:
182
+ other_tags.append(tag)
183
+
184
+ rating_tags = sorted(set(rating_tags), key=rating_tags.index)
185
+ rating_tags = [rating_tags[0]] if rating_tags else []
186
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
187
+
188
+ output_prompt = ", ".join(people_tags + other_tags + rating_tags)
189
+
190
+ return output_prompt
191
+
192
+
193
+ def translate_prompt(prompt: str = ""):
194
+ def translate_to_english(prompt):
195
+ import httpcore
196
+ setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
197
+ from googletrans import Translator
198
+ translator = Translator()
199
+ try:
200
+ translated_prompt = translator.translate(prompt, src='auto', dest='en').text
201
+ return translated_prompt
202
+ except Exception as e:
203
+ print(e)
204
+ return prompt
205
+
206
+ def is_japanese(s):
207
+ import unicodedata
208
+ for ch in s:
209
+ name = unicodedata.name(ch, "")
210
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
211
+ return True
212
+ return False
213
+
214
+ def to_list(s):
215
+ return [x.strip() for x in s.split(",")]
216
+
217
+ prompts = to_list(prompt)
218
+ outputs = []
219
+ for p in prompts:
220
+ p = translate_to_english(p) if is_japanese(p) else p
221
+ outputs.append(p)
222
+
223
+ return ", ".join(outputs)
224
+
225
+
226
+ def translate_prompt_to_ja(prompt: str = ""):
227
+ def translate_to_japanese(prompt):
228
+ import httpcore
229
+ setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
230
+ from googletrans import Translator
231
+ translator = Translator()
232
+ try:
233
+ translated_prompt = translator.translate(prompt, src='en', dest='ja').text
234
+ return translated_prompt
235
+ except Exception as e:
236
+ print(e)
237
+ return prompt
238
+
239
+ def is_japanese(s):
240
+ import unicodedata
241
+ for ch in s:
242
+ name = unicodedata.name(ch, "")
243
+ if "CJK UNIFIED" in name or "HIRAGANA" in name or "KATAKANA" in name:
244
+ return True
245
+ return False
246
+
247
+ def to_list(s):
248
+ return [x.strip() for x in s.split(",")]
249
+
250
+ prompts = to_list(prompt)
251
+ outputs = []
252
+ for p in prompts:
253
+ p = translate_to_japanese(p) if not is_japanese(p) else p
254
+ outputs.append(p)
255
+
256
+ return ", ".join(outputs)
257
+
258
+
259
+ def tags_to_ja(itag, dict):
260
+ def t_to_j(match, dict):
261
+ tag = match.group(0)
262
+ ja = dict.get(replace_underline(tag), "")
263
+ if ja:
264
+ return ja
265
+ else:
266
+ return tag
267
+
268
+ import re
269
+ tag = re.sub(r'[\w ]+', lambda wrapper: t_to_j(wrapper, dict), itag, 2)
270
+
271
+ return tag
272
+
273
+
274
+ def convert_tags_to_ja(input_prompt: str = ""):
275
+ tags = input_prompt.split(",") if input_prompt else []
276
+ out_tags = []
277
+
278
+ tags_to_ja_dict = load_dict_from_csv('all_tags_ja_ext.csv')
279
+ dict = tags_to_ja_dict
280
+ for tag in tags:
281
+ tag = replace_underline(tag)
282
+ tag = tags_to_ja(tag, dict)
283
+ out_tags.append(tag)
284
+
285
+ return ", ".join(out_tags)
286
+
287
+
288
+ enable_auto_recom_prompt = True
289
+
290
+
291
+ animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
292
+ animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
293
+ pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
294
+ pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
295
+ other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
296
+ other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
297
+ default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
298
+ default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
299
+ def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
300
+ global enable_auto_recom_prompt
301
+ prompts = to_list(prompt)
302
+ neg_prompts = to_list(neg_prompt)
303
+
304
+ prompts = list_sub(prompts, animagine_ps + pony_ps)
305
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps)
306
+
307
+ last_empty_p = [""] if not prompts and type != "None" else []
308
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
309
+
310
+ if type == "Auto":
311
+ enable_auto_recom_prompt = True
312
+ else:
313
+ enable_auto_recom_prompt = False
314
+ if type == "Animagine":
315
+ prompts = prompts + animagine_ps
316
+ neg_prompts = neg_prompts + animagine_nps
317
+ elif type == "Pony":
318
+ prompts = prompts + pony_ps
319
+ neg_prompts = neg_prompts + pony_nps
320
+
321
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
322
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
323
+
324
+ return prompt, neg_prompt
325
+
326
+
327
+ def load_model_prompt_dict():
328
+ import json
329
+ dict = {}
330
+ path = 'model_dict.json' if Path('model_dict.json').exists() else './tagger/model_dict.json'
331
+ try:
332
+ with open('model_dict.json', encoding='utf-8') as f:
333
+ dict = json.load(f)
334
+ except Exception:
335
+ pass
336
+ return dict
337
+
338
+
339
+ model_prompt_dict = load_model_prompt_dict()
340
+
341
+
342
+ def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None"):
343
+ if not model_name or not enable_auto_recom_prompt: return prompt, neg_prompt
344
+ prompts = to_list(prompt)
345
+ neg_prompts = to_list(neg_prompt)
346
+ prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
347
+ neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
348
+ last_empty_p = [""] if not prompts and type != "None" else []
349
+ last_empty_np = [""] if not neg_prompts and type != "None" else []
350
+ ps = []
351
+ nps = []
352
+ if model_name in model_prompt_dict.keys():
353
+ ps = to_list(model_prompt_dict[model_name]["prompt"])
354
+ nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
355
+ else:
356
+ ps = default_ps
357
+ nps = default_nps
358
+ prompts = prompts + ps
359
+ neg_prompts = neg_prompts + nps
360
+ prompt = ", ".join(list_uniq(prompts) + last_empty_p)
361
+ neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
362
+ return prompt, neg_prompt
363
+
364
+
365
+ tag_group_dict = load_dict_from_csv('tag_group.csv')
366
+
367
+
368
+ def remove_specific_prompt(input_prompt: str = "", keep_tags: str = "all"):
369
+ def is_dressed(tag):
370
+ import re
371
+ p = re.compile(r'dress|cloth|uniform|costume|vest|sweater|coat|shirt|jacket|blazer|apron|leotard|hood|sleeve|skirt|shorts|pant|loafer|ribbon|necktie|bow|collar|glove|sock|shoe|boots|wear|emblem')
372
+ return p.search(tag)
373
+
374
+ def is_background(tag):
375
+ import re
376
+ p = re.compile(r'background|outline|light|sky|build|day|screen|tree|city')
377
+ return p.search(tag)
378
+
379
+ un_tags = ['solo']
380
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
381
+ keep_group_dict = {
382
+ "body": ['groups', 'body_parts'],
383
+ "dress": ['groups', 'body_parts', 'attire'],
384
+ "all": group_list,
385
+ }
386
+
387
+ def is_necessary(tag, keep_tags, group_dict):
388
+ if keep_tags == "all":
389
+ return True
390
+ elif tag in un_tags or group_dict.get(tag, "") in explicit_group:
391
+ return False
392
+ elif keep_tags == "body" and is_dressed(tag):
393
+ return False
394
+ elif is_background(tag):
395
+ return False
396
+ else:
397
+ return True
398
+
399
+ if keep_tags == "all": return input_prompt
400
+ keep_group = keep_group_dict.get(keep_tags, keep_group_dict["body"])
401
+ explicit_group = list(set(group_list) ^ set(keep_group))
402
+
403
+ tags = input_prompt.split(",") if input_prompt else []
404
+ people_tags: list[str] = []
405
+ other_tags: list[str] = []
406
+
407
+ group_dict = tag_group_dict
408
+ for tag in tags:
409
+ tag = replace_underline(tag)
410
+ if tag in PEOPLE_TAGS:
411
+ people_tags.append(tag)
412
+ elif is_necessary(tag, keep_tags, group_dict):
413
+ other_tags.append(tag)
414
+
415
+ output_prompt = ", ".join(people_tags + other_tags)
416
+
417
+ return output_prompt
418
+
419
+
420
+ def sort_taglist(tags: list[str]):
421
+ if not tags: return []
422
+ character_tags: list[str] = []
423
+ series_tags: list[str] = []
424
+ people_tags: list[str] = []
425
+ group_list = ['groups', 'body_parts', 'attire', 'posture', 'objects', 'creatures', 'locations', 'disambiguation_pages', 'commonly_misused_tags', 'phrases', 'verbs_and_gerunds', 'subjective', 'nudity', 'sex_objects', 'sex', 'sex_acts', 'image_composition', 'artistic_license', 'text', 'year_tags', 'metatags']
426
+ group_tags = {}
427
+ other_tags: list[str] = []
428
+ rating_tags: list[str] = []
429
+
430
+ group_dict = tag_group_dict
431
+ group_set = set(group_dict.keys())
432
+ character_set = set(anime_series_dict.keys())
433
+ series_set = set(anime_series_dict.values())
434
+ rating_set = set(DANBOORU_TO_E621_RATING_MAP.keys()) | set(DANBOORU_TO_E621_RATING_MAP.values())
435
+
436
+ for tag in tags:
437
+ tag = replace_underline(tag)
438
+ if tag in PEOPLE_TAGS:
439
+ people_tags.append(tag)
440
+ elif tag in rating_set:
441
+ rating_tags.append(tag)
442
+ elif tag in group_set:
443
+ elem = group_dict[tag]
444
+ group_tags[elem] = group_tags[elem] + [tag] if elem in group_tags else [tag]
445
+ elif tag in character_set:
446
+ character_tags.append(tag)
447
+ elif tag in series_set:
448
+ series_tags.append(tag)
449
+ else:
450
+ other_tags.append(tag)
451
+
452
+ output_group_tags: list[str] = []
453
+ for k in group_list:
454
+ output_group_tags.extend(group_tags.get(k, []))
455
+
456
+ rating_tags = [rating_tags[0]] if rating_tags else []
457
+ rating_tags = ["explicit, nsfw"] if rating_tags and rating_tags[0] == "explicit" else rating_tags
458
+
459
+ output_tags = character_tags + series_tags + people_tags + output_group_tags + other_tags + rating_tags
460
+
461
+ return output_tags
462
+
463
+
464
+ def sort_tags(tags: str):
465
+ if not tags: return ""
466
+ taglist: list[str] = []
467
+ for tag in tags.split(","):
468
+ taglist.append(tag.strip())
469
+ taglist = list(filter(lambda x: x != "", taglist))
470
+ return ", ".join(sort_taglist(taglist))
471
+
472
+
473
+ def postprocess_results(results: dict[str, float], general_threshold: float, character_threshold: float):
474
+ results = {
475
+ k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)
476
+ }
477
+
478
+ rating = {}
479
+ character = {}
480
+ general = {}
481
+
482
+ for k, v in results.items():
483
+ if k.startswith("rating:"):
484
+ rating[k.replace("rating:", "")] = v
485
+ continue
486
+ elif k.startswith("character:"):
487
+ character[k.replace("character:", "")] = v
488
+ continue
489
+
490
+ general[k] = v
491
+
492
+ character = {k: v for k, v in character.items() if v >= character_threshold}
493
+ general = {k: v for k, v in general.items() if v >= general_threshold}
494
+
495
+ return rating, character, general
496
+
497
+
498
+ def gen_prompt(rating: list[str], character: list[str], general: list[str]):
499
+ people_tags: list[str] = []
500
+ other_tags: list[str] = []
501
+ rating_tag = RATING_MAP[rating[0]]
502
+
503
+ for tag in general:
504
+ if tag in PEOPLE_TAGS:
505
+ people_tags.append(tag)
506
+ else:
507
+ other_tags.append(tag)
508
+
509
+ all_tags = people_tags + other_tags
510
+
511
+ return ", ".join(all_tags)
512
+
513
+
514
+ @spaces.GPU(duration=30)
515
+ def predict_tags(image: Image.Image, general_threshold: float = 0.3, character_threshold: float = 0.8):
516
+ inputs = wd_processor.preprocess(image, return_tensors="pt")
517
+
518
+ outputs = wd_model(**inputs.to(wd_model.device, wd_model.dtype))
519
+ logits = torch.sigmoid(outputs.logits[0]) # take the first logits
520
+
521
+ # get probabilities
522
+ if device != default_device: wd_model.to(device=device)
523
+ results = {
524
+ wd_model.config.id2label[i]: float(logit.float()) for i, logit in enumerate(logits)
525
+ }
526
+ if device != default_device: wd_model.to(device=default_device)
527
+ # rating, character, general
528
+ rating, character, general = postprocess_results(
529
+ results, general_threshold, character_threshold
530
+ )
531
+ prompt = gen_prompt(
532
+ list(rating.keys()), list(character.keys()), list(general.keys())
533
+ )
534
+ output_series_tag = ""
535
+ output_series_list = character_list_to_series_list(character.keys())
536
+ if output_series_list:
537
+ output_series_tag = output_series_list[0]
538
+ else:
539
+ output_series_tag = ""
540
+ return output_series_tag, ", ".join(character.keys()), prompt, gr.update(interactive=True)
541
+
542
+
543
+ def predict_tags_wd(image: Image.Image, input_tags: str, algo: list[str], general_threshold: float = 0.3,
544
+ character_threshold: float = 0.8, input_series: str = "", input_character: str = ""):
545
+ if not "Use WD Tagger" in algo and len(algo) != 0:
546
+ return input_series, input_character, input_tags, gr.update(interactive=True)
547
+ return predict_tags(image, general_threshold, character_threshold)
548
+
549
+
550
+ def compose_prompt_to_copy(character: str, series: str, general: str):
551
+ characters = character.split(",") if character else []
552
+ serieses = series.split(",") if series else []
553
+ generals = general.split(",") if general else []
554
+ tags = characters + serieses + generals
555
+ cprompt = ",".join(tags) if tags else ""
556
+ return cprompt
tagger/utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from dartrs.v2 import AspectRatioTag, LengthTag, RatingTag, IdentityTag
3
+
4
+
5
+ V2_ASPECT_RATIO_OPTIONS: list[AspectRatioTag] = [
6
+ "ultra_wide",
7
+ "wide",
8
+ "square",
9
+ "tall",
10
+ "ultra_tall",
11
+ ]
12
+ V2_RATING_OPTIONS: list[RatingTag] = [
13
+ "sfw",
14
+ "general",
15
+ "sensitive",
16
+ "nsfw",
17
+ "questionable",
18
+ "explicit",
19
+ ]
20
+ V2_LENGTH_OPTIONS: list[LengthTag] = [
21
+ "very_short",
22
+ "short",
23
+ "medium",
24
+ "long",
25
+ "very_long",
26
+ ]
27
+ V2_IDENTITY_OPTIONS: list[IdentityTag] = [
28
+ "none",
29
+ "lax",
30
+ "strict",
31
+ ]
32
+
33
+
34
+ # ref: https://qiita.com/tregu148/items/fccccbbc47d966dd2fc2
35
+ def gradio_copy_text(_text: None):
36
+ gr.Info("Copied!")
37
+
38
+
39
+ COPY_ACTION_JS = """\
40
+ (inputs, _outputs) => {
41
+ // inputs is the string value of the input_text
42
+ if (inputs.trim() !== "") {
43
+ navigator.clipboard.writeText(inputs);
44
+ }
45
+ }"""
46
+
47
+
48
+ def gradio_copy_prompt(prompt: str):
49
+ gr.Info("Copied!")
50
+ return prompt
tagger/v2.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import torch
3
+ from typing import Callable
4
+ from pathlib import Path
5
+
6
+ from dartrs.v2 import (
7
+ V2Model,
8
+ MixtralModel,
9
+ MistralModel,
10
+ compose_prompt,
11
+ LengthTag,
12
+ AspectRatioTag,
13
+ RatingTag,
14
+ IdentityTag,
15
+ )
16
+ from dartrs.dartrs import DartTokenizer
17
+ from dartrs.utils import get_generation_config
18
+
19
+
20
+ import gradio as gr
21
+ from gradio.components import Component
22
+
23
+
24
+ try:
25
+ from output import UpsamplingOutput
26
+ except:
27
+ from .output import UpsamplingOutput
28
+
29
+
30
+ V2_ALL_MODELS = {
31
+ "dart-v2-moe-sft": {
32
+ "repo": "p1atdev/dart-v2-moe-sft",
33
+ "type": "sft",
34
+ "class": MixtralModel,
35
+ },
36
+ "dart-v2-sft": {
37
+ "repo": "p1atdev/dart-v2-sft",
38
+ "type": "sft",
39
+ "class": MistralModel,
40
+ },
41
+ }
42
+
43
+
44
+ def prepare_models(model_config: dict):
45
+ model_name = model_config["repo"]
46
+ tokenizer = DartTokenizer.from_pretrained(model_name)
47
+ model = model_config["class"].from_pretrained(model_name)
48
+
49
+ return {
50
+ "tokenizer": tokenizer,
51
+ "model": model,
52
+ }
53
+
54
+
55
+ def normalize_tags(tokenizer: DartTokenizer, tags: str):
56
+ """Just remove unk tokens."""
57
+ return ", ".join([tag for tag in tokenizer.tokenize(tags) if tag != "<|unk|>"])
58
+
59
+
60
+ @torch.no_grad()
61
+ def generate_tags(
62
+ model: V2Model,
63
+ tokenizer: DartTokenizer,
64
+ prompt: str,
65
+ ban_token_ids: list[int],
66
+ ):
67
+ output = model.generate(
68
+ get_generation_config(
69
+ prompt,
70
+ tokenizer=tokenizer,
71
+ temperature=1,
72
+ top_p=0.9,
73
+ top_k=100,
74
+ max_new_tokens=256,
75
+ ban_token_ids=ban_token_ids,
76
+ ),
77
+ )
78
+
79
+ return output
80
+
81
+
82
+ def _people_tag(noun: str, minimum: int = 1, maximum: int = 5):
83
+ return (
84
+ [f"1{noun}"]
85
+ + [f"{num}{noun}s" for num in range(minimum + 1, maximum + 1)]
86
+ + [f"{maximum+1}+{noun}s"]
87
+ )
88
+
89
+
90
+ PEOPLE_TAGS = (
91
+ _people_tag("girl") + _people_tag("boy") + _people_tag("other") + ["no humans"]
92
+ )
93
+
94
+
95
+ def gen_prompt_text(output: UpsamplingOutput):
96
+ # separate people tags (e.g. 1girl)
97
+ people_tags = []
98
+ other_general_tags = []
99
+
100
+ for tag in output.general_tags.split(","):
101
+ tag = tag.strip()
102
+ if tag in PEOPLE_TAGS:
103
+ people_tags.append(tag)
104
+ else:
105
+ other_general_tags.append(tag)
106
+
107
+ return ", ".join(
108
+ [
109
+ part.strip()
110
+ for part in [
111
+ *people_tags,
112
+ output.character_tags,
113
+ output.copyright_tags,
114
+ *other_general_tags,
115
+ output.upsampled_tags,
116
+ output.rating_tag,
117
+ ]
118
+ if part.strip() != ""
119
+ ]
120
+ )
121
+
122
+
123
+ def elapsed_time_format(elapsed_time: float) -> str:
124
+ return f"Elapsed: {elapsed_time:.2f} seconds"
125
+
126
+
127
+ def parse_upsampling_output(
128
+ upsampler: Callable[..., UpsamplingOutput],
129
+ ):
130
+ def _parse_upsampling_output(*args) -> tuple[str, str, dict]:
131
+ output = upsampler(*args)
132
+
133
+ return (
134
+ gen_prompt_text(output),
135
+ elapsed_time_format(output.elapsed_time),
136
+ gr.update(interactive=True),
137
+ gr.update(interactive=True),
138
+ )
139
+
140
+ return _parse_upsampling_output
141
+
142
+
143
+ class V2UI:
144
+ model_name: str | None = None
145
+ model: V2Model
146
+ tokenizer: DartTokenizer
147
+
148
+ input_components: list[Component] = []
149
+ generate_btn: gr.Button
150
+
151
+ def on_generate(
152
+ self,
153
+ model_name: str,
154
+ copyright_tags: str,
155
+ character_tags: str,
156
+ general_tags: str,
157
+ rating_tag: RatingTag,
158
+ aspect_ratio_tag: AspectRatioTag,
159
+ length_tag: LengthTag,
160
+ identity_tag: IdentityTag,
161
+ ban_tags: str,
162
+ *args,
163
+ ) -> UpsamplingOutput:
164
+ if self.model_name is None or self.model_name != model_name:
165
+ models = prepare_models(V2_ALL_MODELS[model_name])
166
+ self.model = models["model"]
167
+ self.tokenizer = models["tokenizer"]
168
+ self.model_name = model_name
169
+
170
+ # normalize tags
171
+ # copyright_tags = normalize_tags(self.tokenizer, copyright_tags)
172
+ # character_tags = normalize_tags(self.tokenizer, character_tags)
173
+ # general_tags = normalize_tags(self.tokenizer, general_tags)
174
+
175
+ ban_token_ids = self.tokenizer.encode(ban_tags.strip())
176
+
177
+ prompt = compose_prompt(
178
+ prompt=general_tags,
179
+ copyright=copyright_tags,
180
+ character=character_tags,
181
+ rating=rating_tag,
182
+ aspect_ratio=aspect_ratio_tag,
183
+ length=length_tag,
184
+ identity=identity_tag,
185
+ )
186
+
187
+ start = time.time()
188
+ upsampled_tags = generate_tags(
189
+ self.model,
190
+ self.tokenizer,
191
+ prompt,
192
+ ban_token_ids,
193
+ )
194
+ elapsed_time = time.time() - start
195
+
196
+ return UpsamplingOutput(
197
+ upsampled_tags=upsampled_tags,
198
+ copyright_tags=copyright_tags,
199
+ character_tags=character_tags,
200
+ general_tags=general_tags,
201
+ rating_tag=rating_tag,
202
+ aspect_ratio_tag=aspect_ratio_tag,
203
+ length_tag=length_tag,
204
+ identity_tag=identity_tag,
205
+ elapsed_time=elapsed_time,
206
+ )
207
+
208
+
209
+ def parse_upsampling_output_simple(upsampler: UpsamplingOutput):
210
+ return gen_prompt_text(upsampler)
211
+
212
+
213
+ v2 = V2UI()
214
+
215
+
216
+ def v2_upsampling_prompt(model: str = "dart-v2-moe-sft", copyright: str = "", character: str = "",
217
+ general_tags: str = "", rating: str = "nsfw", aspect_ratio: str = "square",
218
+ length: str = "very_long", identity: str = "lax", ban_tags: str = "censored"):
219
+ raw_prompt = parse_upsampling_output_simple(v2.on_generate(model, copyright, character, general_tags,
220
+ rating, aspect_ratio, length, identity, ban_tags))
221
+ return raw_prompt
222
+
223
+
224
+ def load_dict_from_csv(filename):
225
+ dict = {}
226
+ if not Path(filename).exists():
227
+ if Path('./tagger/', filename).exists(): filename = str(Path('./tagger/', filename))
228
+ else: return dict
229
+ try:
230
+ with open(filename, 'r', encoding="utf-8") as f:
231
+ lines = f.readlines()
232
+ except Exception:
233
+ print(f"Failed to open dictionary file: {filename}")
234
+ return dict
235
+ for line in lines:
236
+ parts = line.strip().split(',')
237
+ dict[parts[0]] = parts[1]
238
+ return dict
239
+
240
+
241
+ anime_series_dict = load_dict_from_csv('character_series_dict.csv')
242
+
243
+
244
+ def select_random_character(series: str, character: str):
245
+ from random import seed, randrange
246
+ seed()
247
+ character_list = list(anime_series_dict.keys())
248
+ character = character_list[randrange(len(character_list) - 1)]
249
+ series = anime_series_dict.get(character.split(",")[0].strip(), "")
250
+ return series, character
251
+
252
+
253
+ def v2_random_prompt(general_tags: str = "", copyright: str = "", character: str = "", rating: str = "nsfw",
254
+ aspect_ratio: str = "square", length: str = "very_long", identity: str = "lax",
255
+ ban_tags: str = "censored", model: str = "dart-v2-moe-sft"):
256
+ if copyright == "" and character == "":
257
+ copyright, character = select_random_character("", "")
258
+ raw_prompt = v2_upsampling_prompt(model, copyright, character, general_tags, rating,
259
+ aspect_ratio, length, identity, ban_tags)
260
+ return raw_prompt, copyright, character
textual_inversion_dict.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bad_prompt_version2.pt": [
3
+ "bad_prompt",
4
+ false
5
+ ],
6
+ "EasyNegativeV2.safetensors": [
7
+ "EasyNegative",
8
+ false
9
+ ],
10
+ "bad-hands-5.pt": [
11
+ "bad_hand",
12
+ false
13
+ ],
14
+ "negativeXL_A.safetensors": [
15
+ "negativeXL_A",
16
+ false
17
+ ],
18
+ "negativeXL_B.safetensors": [
19
+ "negativeXL_B",
20
+ false
21
+ ],
22
+ "negativeXL_C.safetensors": [
23
+ "negativeXL_C",
24
+ false
25
+ ],
26
+ "negativeXL_D.safetensors": [
27
+ "negativeXL_D",
28
+ false
29
+ ],
30
+ "unaestheticXL2v10.safetensors": [
31
+ "2v10",
32
+ false
33
+ ],
34
+ "unaestheticXL_AYv1.safetensors": [
35
+ "_AYv1",
36
+ false
37
+ ],
38
+ "unaestheticXL_Alb2.safetensors": [
39
+ "_Alb2",
40
+ false
41
+ ],
42
+ "unaestheticXL_Jug6.safetensors": [
43
+ "_Jug6",
44
+ false
45
+ ],
46
+ "unaestheticXL_bp5.safetensors": [
47
+ "_bp5",
48
+ false
49
+ ],
50
+ "unaestheticXL_hk1.safetensors": [
51
+ "_hk1",
52
+ false
53
+ ],
54
+ "unaestheticXLv1.safetensors": [
55
+ "v1.0",
56
+ false
57
+ ],
58
+ "unaestheticXLv13.safetensors": [
59
+ "v1.3",
60
+ false
61
+ ],
62
+ "unaestheticXLv31.safetensors": [
63
+ "v3.1",
64
+ false
65
+ ],
66
+ "unaestheticXL_Sky3.1.safetensors": [
67
+ "_Sky3.1",
68
+ false
69
+ ],
70
+ "SimplePositiveXLv2.safetensors": [
71
+ "SIMPLEPOSITIVEXLV2",
72
+ true
73
+ ]
74
+ }