import pytorch from pytorch import autocast from diffusers import StableDiffusionPipeline from datasets import load_dataset from PIL import Image import re model_id = "CompVis/stable-diffusion-v1-4" device = "cuda" pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True, revision="fp16", torch_dtype=torch.float16) pipe = pipe.to(device) def infer(prompt, samples, steps, scale, seed): generator = torch.Generator(device=device).manual_seed(seed) with autocast("cuda"): images_list = pipe( [prompt] * samples, num_inference_steps=steps, guidance_scale=scale, generator=generator, ) images = [] images.append(image) return images css = """ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: black; background: black; } input[type='range'] { accent-color: black; } .dark input[type='range'] { accent-color: #dfdfdf; } .container { max-width: 1070px; margin: auto; padding-top: 2rem; } #gallery { min-height: 22rem; margin-bottom: 15px; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-btn { font-size: .7rem !important; line-height: 19px; margin-top: 24px; margin-bottom: 12px; padding: 2px 8px; border-radius: 14px !important; } #advanced-options { display: none; margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } """ block = gr.Blocks(css=css) examples = [ [ 'A high tech solarpunk utopia in the Amazon rainforest', 3, 40, 7.5, 1024, ], [ 'A pikachu fine dining with a view to the Eiffel Tower', 3, 40, 7, 1024, ], [ 'A mecha robot in a favela in expressionist style', 3, 40, 7, 1024, ], [ 'an insect robot preparing a delicious meal', 3, 40, 7, 1024, ], [ "A small cabin on top of a snowy mountain in the style of disney, arstation", 3, 40, 7, 1024, ], ] with block: gr.HTML( """

Stable Diffusion Spaces

Stable Diffusion is a state of the art text-to-image model that generates images from a text description. For faster generation and forthcoming API access you can try DreamStudio Beta

""" ) with gr.Group(): with gr.Box(): with gr.Row().style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) btn = gr.Button("Generate image").style( margin=False, rounded=(False, True, True, False), ) gallery = gr.Gallery( label="Generated images", show_label=False, elem_id="gallery" ).style(grid=[3], height="auto") advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") with gr.Row(elem_id="advanced-options"): samples = gr.Slider(label="Images", minimum=1, maximum=3, value=3, step=1) steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=40, step=1) scale = gr.Slider( label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1 ) seed = gr.Slider( label="Random seed", minimum=0, maximum=2147483647, step=1, randomize=True, ) ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed], outputs=gallery, cache_examples=True) ex.dataset.headers = [""] text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) advanced_button.click( None, [], text, _js=""" () => { const options = document.querySelector("body > gradio-app").querySelector("#advanced-options"); options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none"; }""", ) gr.HTML( """

LICENSE

The model is licensed with an CreativeML Open RAIL-M license. The license states that the outputs that you make fully belong to you, and you are liable when sharing it. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license

Biases and content acknowledgment

Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scrapped non-curated image-text-pairs from the internet (the exception being the the removal of illegal content) and is meant for research purposes. You can read more in the model card

""" ) block.queue(max_size=40).launch()