File size: 11,581 Bytes
2e857d1
 
 
 
8300fda
 
 
 
 
2e857d1
 
 
 
 
 
 
8300fda
2e857d1
8300fda
 
2e857d1
 
 
 
 
8300fda
 
2e857d1
8300fda
 
2e857d1
 
 
 
 
 
8300fda
2e857d1
8300fda
2e857d1
 
 
 
 
 
 
 
 
 
8300fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e857d1
 
 
 
 
 
 
 
8300fda
 
 
 
 
 
 
 
 
 
 
 
 
2e857d1
8300fda
97cf4f0
 
 
2e857d1
 
8300fda
 
 
2e857d1
 
 
 
8300fda
 
 
97cf4f0
 
 
2e857d1
 
 
 
 
 
 
 
 
8300fda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e857d1
8300fda
 
97cf4f0
 
2e857d1
 
 
 
 
8300fda
2e857d1
 
 
 
 
 
8300fda
 
 
 
2e857d1
 
8300fda
 
 
 
 
97cf4f0
 
 
2e857d1
 
 
 
 
 
 
 
 
8300fda
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
import gradio as gr
from random import randint
from all_models import models
from externalmod import gr_Interface_load
import asyncio
import os
from threading import RLock
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.

def load_fn(models):
    global models_load
    models_load = {}
    for model in models:
        if model not in models_load.keys():
            try:
                m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
            except Exception as error:
                print(error)
                m = gr.Interface(lambda: None, ['text'], ['image'])
            models_load.update({model: m})

load_fn(models)

num_models = 1
max_imagesone = 1
max_images = 6
default_models = models[:num_models]
inference_timeout = 300
MAX_SEED = 2**32-1

def extend_choices(choices):
    return choices + (num_models - len(choices)) * ['NA']

def update_imgbox(choices):
    choices_plus = extend_choices(choices)
    return [gr.Image(None, label=m, visible=(m != 'NA')) for m in choices_plus]

def gen_fn_original(model_str, prompt):
    if model_str == 'NA':
        return None
    noise = str('') #str(randint(0, 99999999999))
    return models_load[model_str](f'{prompt} {noise}')

def gen_fnsix(model_str, prompt):
    if model_str == 'NA':
        return None
    noisesix = str(randint(1941, 2023)) #str(randint(0, 99999999999))
    return models_load[model_str](f'{prompt} {noisesix}')

# https://huggingface.co/docs/api-inference/detailed_parameters
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
    from pathlib import Path
    kwargs = {}
    if height is not None and height >= 256: kwargs["height"] = height
    if width is not None and width >= 256: kwargs["width"] = width
    if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
    if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
    noise = ""
    if seed >= 0: kwargs["seed"] = seed
    else:
        rand = randint(1, 500)
        for i in range(rand):
            noise += " "
    task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
                               prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
    await asyncio.sleep(0)
    try:
        result = await asyncio.wait_for(task, timeout=timeout)
    except (Exception, asyncio.TimeoutError) as e:
        print(e)
        print(f"Task timed out: {model_str}")
        if not task.done(): task.cancel()
        result = None
    if task.done() and result is not None:
        with lock:
            png_path = "image.png"
            result.save(png_path)
            image = str(Path(png_path).resolve())
        return image
    return None

def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
    if model_str == 'NA':
        return None
    try:
        loop = asyncio.new_event_loop()
        result = loop.run_until_complete(infer(model_str, prompt, nprompt,
                                         height, width, steps, cfg, seed, inference_timeout))
    except (Exception, asyncio.CancelledError) as e:
        print(e)
        print(f"Task aborted: {model_str}")
        result = None
    finally:
        loop.close()
    return result

css="""

.gradio-container {max-width: 1200px; margin: 0 auto; !important;}

.output { width=128px; height=128px; !important; }

.outputone { width=512px; height=512px; !important; }

"""
with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
    gr.HTML(
    """

        <div>

        <p> <center><img src="https://huggingface.co/Yntec/OpenGenDiffusers/resolve/main/pp.png" style="height:128px; width:482px; margin-top: -22px; margin-bottom: -44px;" span title="Free ai art image generator Printing Press"></center>

        </p>

    """
)    
    with gr.Tab('One Image'):
        model_choice = gr.Dropdown(models, label=f'Choose a model from the {int(len(models))} available! Try clearing the box and typing on it to filter them!', value=models[0], filterable=True)
        with gr.Group():
            txt_input = gr.Textbox(label='Your prompt:', lines=1)
            with gr.Accordion("Advanced", open=False, visible=True):
                neg_input = gr.Textbox(label='Negative prompt:', lines=1)
                with gr.Row():
                    width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
                    height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
                with gr.Row():
                    steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
                    cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
                    seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
        num_imagesone = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='Nobody gets to see this label so I can put here whatever I want!', visible=False)
        
        with gr.Row():
            gen_button = gr.Button('Generate', variant='primary', scale=3)
            #stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
        #gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
        
        with gr.Row():
            output = [gr.Image(label='', show_download_button=True, elem_classes="outputone",
                               interactive=False, min_width=80, show_share_button=False, format="png",
                               visible=True) for _ in range(max_imagesone)]

        for i, o in enumerate(output):
            img_in = gr.Number(i, visible = False)
            num_imagesone.change(lambda i, n: gr.update(visible = (i < n)), [img_in, num_imagesone], o, show_progress = False)
            gen_event = gr.on(triggers=[gen_button.click, txt_input.submit],
                               fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
                               inputs=[img_in, num_imagesone, model_choice, txt_input, neg_input,
                                       height, width, steps, cfg, seed], outputs=[o],
                                       concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
            #stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels=[gen_event])
        with gr.Row():
            gr.HTML(
    """

        <div class="footer">

        <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier, <a href="https://huggingface.co/spaces/Yntec/Diffusion60XX">Diffusion60XX</a> and <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!

        </p>

    """
)
    with gr.Tab('Up To Six'):
        model_choice2 = gr.Dropdown(models, label=f'Choose a model from the {int(len(models))} available! Try clearing the box and typing on it to filter them!',
                                    value=models[0], filterable=True)
        with gr.Group():
            txt_input2 = gr.Textbox(label='Your prompt:', lines=1)
            with gr.Accordion("Advanced", open=False, visible=True):
                neg_input2 = gr.Textbox(label='Negative prompt:', lines=1)
                with gr.Row():
                    width2 = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
                    height2 = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
                with gr.Row():
                    steps2 = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
                    cfg2 = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
                    seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)

        num_images = gr.Slider(1, max_images, value=max_images, step=1, 
         label=f'Number of images (if you want less than {int(max_images)} decrease them slowly until they match the boxes below)')
        
        with gr.Row():
            gen_button2 = gr.Button(f'Generate up to {int(max_images)} images in up to 3 minutes total', scale=3)
            #stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
        #gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
        gr.HTML(
        """

            <div style="text-align: center; max-width: 1200px; margin: 0 auto;">

              <div>

                <body>

                <div class="center"><p style="margin-bottom: 10px;">Scroll down to see more images (they generate in a random order).</p>

                </div>

                </body>

              </div>

            </div>

        """
               )
        with gr.Row():
            output2 = [gr.Image(label = '', show_download_button=True, elem_classes="output",
                                interactive=False, min_width=80, visible=True, format="png",
                                show_share_button=False, show_label=False) for _ in range(max_images)]

        for i, o in enumerate(output2):
            img_i = gr.Number(i, visible=False)
            num_images.change(lambda i, n: gr.update(visible=(i < n)), [img_i, num_images], o, show_progress=False)
            gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
                               fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
                               inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
                                       height2, width2, steps2, cfg2, seed2], outputs=[o],
                                       concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
            #stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
        with gr.Row():
            gr.HTML(
    """

        <div class="footer">

        <p> Based on the <a href="https://huggingface.co/spaces/derwahnsinn/TestGen">TestGen</a> Space by derwahnsinn, the <a href="https://huggingface.co/spaces/RdnUser77/SpacIO_v1">SpacIO</a> Space by RdnUser77, Omnibus's Maximum Multiplier, <a href="https://huggingface.co/spaces/Yntec/Diffusion60XX">Diffusion60XX</a> and <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>!

        </p>

    """
)

demo.queue(default_concurrency_limit=200, max_size=200)                        
demo.launch(show_api=False, max_threads=400)