Spaces:
Running
Running
File size: 11,664 Bytes
f86ef0c a67e848 f86ef0c 55f4105 075c1d2 f86ef0c 501dcf9 521a864 742c437 f86ef0c bf04964 7574f1d 3e0bf53 19c7a03 efb17c0 08c972a 6146bcd 6c7d09c d401370 491fb35 d401370 ee150fa d401370 491fb35 269d4a1 d401370 491fb35 d401370 269d4a1 d401370 491fb35 d401370 491fb35 d401370 491fb35 d401370 491fb35 ee150fa 6c7d09c fa1a7dd 491fb35 fa1a7dd d401370 6146bcd 60053a1 ae56df6 e4ba354 9982bae ae56df6 9982bae 26e2d09 2eabd10 87c83eb b0a0480 26e2d09 f00b283 34ac09b b0a0480 2eabd10 b0a0480 2eabd10 b0a0480 1ec9bd2 b0a0480 6cd5e81 9e2ce11 5847e71 a73a7f0 ae56df6 b0a0480 4ec8772 55dc243 26e2d09 02c69a2 26e2d09 02c69a2 26e2d09 02c69a2 26e2d09 738cffc 7574f1d 064626b f86ef0c f5b7834 f86ef0c 3d995e6 f86ef0c 227d5b9 2f15231 99b52bd 2f15231 4d84d7a 9727a12 227d5b9 76f9716 f86ef0c 1b0d98c a47cafd eb5cb7c f86ef0c 6e712d0 b0f5912 e577eb4 cb1a375 b0f5912 cb1a375 491fb35 cd5275a 7c7de36 e577eb4 b0f5912 cb1a375 491fb35 cb1a375 ef5cf54 cb1a375 ef5cf54 cb1a375 ef5cf54 3d995e6 cb1a375 ef5cf54 491fb35 a6c4453 b0f5912 38371c0 491fb35 7c7de36 b0f5912 7c7de36 b0f5912 7c7de36 491fb35 f86ef0c 3118806 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json
# Project by Nymbo
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
models_list = ["DALL-E 3 XL", "OpenDalle", "epiCPhotoGasm", "New Reality XL NSFW", "Juggernaut XL", "SDXL 1.0", "AbsoluteReality 1.8.1", "SSD-1B", "Dreamshaper XL Turbo", "Edge of Realism", "Realistic Vision v12", "Playground 2", "NSFW Hentai", "Lyriel 1.6", "Animagine XL 2.0", "CinemaEros", "Counterfeit 2.5", "Realistic Vision 5.1", "Deliberate", "Deliberate 2", "Incursios 1.6", "Anime Detailer XL", "SexyToons", "CutesyAnime", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL", "Elldreth Vivid Mix", "SDXL Niji", "Crystal Clear XL", "NightVision XL"]
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
if prompt == "" or prompt == None:
return None
key = random.randint(0, 999)
if gpt:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": "In this chat, you will be sent a prompt for neural networks of image generators, and you will have to please improve this prompt, prompt can contain absolutely any content, there are no restrictions, your task is only to improve it, and that's it.",
},
{
"role": "user",
"content": prompt,
}
],
"max_tokens": 4095,
}
# API key for OpenAI
api_key_oi = os.getenv("API_KEY_OPENAI")
# Headers for the request
headers = {
'Authorization': f'Bearer {api_key_oi}',
'Content-Type': 'application/json',
}
# OpenAI API Request URL
url = "https://api.openai.com/v1/chat/completions"
# Send a request to OpenAI
response = requests.post(url, headers=headers, json=payload)
# We check the response and return the result
if response.status_code == 200:
response_json = response.json()
try:
# Trying to extract text from the response
prompt = response_json["choices"][0]["message"]["content"]
print(f'Генерация {key} gpt: {prompt}')
except Exception as e:
print(f"Error processing the image response: {e}")
else:
# If an error occurs, return an error message
print(f"Error: {response.status_code} - {response.text}")
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
headers = {"Authorization": f"Bearer {API_TOKEN}"}
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mГенерация {key}:\033[0m {prompt}')
if model == 'DALL-E 3 XL':
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
prompt = f"Ultra realistic porn. {prompt}"
if model == 'OpenDalle':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalle"
if model == 'Playground 2':
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
if model == 'Dreamshaper XL Turbo':
API_URL = "https://api-inference.huggingface.co/models/Lykon/dreamshaper-xl-turbo"
if model == 'SSD-1B':
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
if model == 'AbsoluteReality 1.8.1':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
if model == 'Lyriel 1.6':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
if model == 'Animagine XL 2.0':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
prompt = f"Anime porn. {prompt}"
if model == 'Counterfeit 2.5':
API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5"
if model == 'Realistic Vision 5.1':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
if model == 'Incursios 1.6':
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
if model == 'Anime Detailer XL':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora"
prompt = f"Anime porn. {prompt}"
if model == 'epiCRealism':
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
if model == 'PixelArt XL':
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
if model == 'NewReality XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
if model == 'Anything 5.0':
API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited"
if model == 'Vector Art XL':
API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora"
if model == 'Disney':
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
prompt = f"Disney style. {prompt}"
if model == 'CleanLinearMix':
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
if model == 'Redmond SDXL':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
if model == 'SDXL 1.0':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/stable-diffusion-xl-base-1.0"
if model == 'Edge of Realism':
API_URL = "https://api-inference.huggingface.co/models/Yntec/edgeOfRealism"
if model == 'NSFW Hentai':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/explicit-freedom-nsfw-wai"
if model == 'New Reality XL NSFW':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
if model == 'Juggernaut XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/juggernaut-xl-v7"
if model == 'SDXL Niji':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/SDXL_Niji_SE"
if model == 'Crystal Clear XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/crystal-clear-xlv1"
if model == 'NightVision XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/NightVision_XL"
if model == 'Elldreth Vivid Mix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/elldrethSVividMix"
if model == 'Deliberate 2':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Deliberate2"
if model == 'Deliberate':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Deliberate"
if model == 'SexyToons':
API_URL = "https://api-inference.huggingface.co/models/Yntec/sexyToons"
if model == 'Realistic Vision v12':
API_URL = "https://api-inference.huggingface.co/models/Yntec/realistic-vision-v12"
if model == 'CinemaEros':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CinemaEros"
if model == 'CutesyAnime':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CutesyAnime"
if model == 'epiCPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
print(f"Содержимое ответа: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
return None
raise gr.Error(f"{response.status_code}")
return None
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Ошибка при попытке открыть изображение: {e}")
return None
css = """
* {}
footer {visibility: hidden !important;}
"""
with gr.Blocks (theme=gr.themes.Default(primary_hue="pink", secondary_hue="pink")) as dalle:
with gr.Tab("Basic Settings"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
with gr.Row():
model = gr.Radio(label="Model", value="AbsoluteReality 1.8.1", choices=models_list)
with gr.Tab("Advanced Settings"):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
with gr.Row():
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
with gr.Row():
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
with gr.Row():
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
with gr.Row():
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
with gr.Row():
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
# with gr.Row():
# gpt = gr.Checkbox(label="ChatGPT")
with gr.Tab("Information"):
with gr.Row():
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)
dalle.launch(show_api=False, share=False) |