Spaces:
Running
Running
import gradio as gr | |
import requests | |
import io | |
import random | |
import os | |
from PIL import Image | |
from deep_translator import GoogleTranslator | |
from langdetect import detect | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" | |
API_TOKEN = os.getenv("HF_READ_TOKEN") | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
timeout = 100 | |
models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "OrangeMixs"] | |
# PLEASE ❤ like ❤ this space. Please like me. I am 12 years old, one of my projects is: https://ai-hub.rf.gd . I live in Russia, I don't know English very well. Therefore, I apologize that there is only Russian here, but I think it will not be difficult to translate all this. (For example, using gpt) | |
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1): | |
if prompt == "" or prompt == None: | |
return None | |
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
language = detect(prompt) | |
key = random.randint(0, 999) | |
print(f'\033[1mГенерация {key}:\033[0m {prompt}') | |
if language == 'ru': | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}') | |
if model == 'DALL-E 3 XL': | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" | |
if model == 'Playground 2': | |
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic" | |
if model == 'Openjourney 4': | |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney-v4" | |
if model == 'AbsoluteReality 1.8.1': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1" | |
if model == 'Lyriel 1.6': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16" | |
if model == 'Animagine XL 2.0': | |
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0" | |
if model == 'Counterfeit 2.5': | |
API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5" | |
if model == 'Realistic Vision 5.1': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51" | |
if model == 'Incursios 1.6': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6" | |
if model == 'Anime Detailer XL': | |
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora" | |
if model == 'epiCRealism': | |
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism" | |
if model == 'PixelArt XL': | |
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl" | |
if model == 'NewReality XL': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw" | |
if model == 'Anything 5.0': | |
API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited" | |
if model == 'Vector Art XL': | |
API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora" | |
if model == 'Disney': | |
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl" | |
if model == 'CleanLinearMix': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw" | |
if model == 'OrangeMixs': | |
API_URL = "https://api-inference.huggingface.co/models/WarriorMama777/OrangeMixs" | |
payload = { | |
"inputs": prompt, | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed != -1 else random.randint(1, 1000000000) | |
} | |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) | |
if response.status_code != 200: | |
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}") | |
print(f"Содержимое ответа: {response.text}") | |
return None | |
try: | |
image_bytes = response.content | |
image = Image.open(io.BytesIO(image_bytes)) | |
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})') | |
return image | |
except Exception as e: | |
print(f"Ошибка при попытке открыть изображение: {e}") | |
return None | |
css = """ | |
* {} | |
footer {visibility: hidden !important;} | |
""" | |
with gr.Blocks(css=css) as dalle: | |
with gr.Tab("Базовые настройки"): | |
with gr.Row(): | |
with gr.Column(elem_id="prompt-container"): | |
with gr.Row(): | |
text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3, elem_id="prompt-text-input") | |
with gr.Row(): | |
model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=models_list) | |
with gr.Tab("Расширенные настройки"): | |
with gr.Row(): | |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input") | |
with gr.Row(): | |
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1) | |
with gr.Row(): | |
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1) | |
with gr.Row(): | |
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"]) | |
with gr.Row(): | |
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) | |
with gr.Row(): | |
text_button = gr.Button("Генерация", variant='primary', elem_id="gen-button") | |
with gr.Row(): | |
image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery") | |
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed], outputs=image_output) | |
dalle.launch(show_api=False, share=False) |