#version 2.1 import os import math import gradio as gr import numpy as np import requests import json import base64 from PIL import Image from io import BytesIO import runpod from enum import Enum api_key = os.getenv("FAI_API_KEY") api = os.getenv("FAI_API") api_anime = os.getenv("FAI_API_2") #api_anime = os.getenv("ANIME_API") rmbgkey = os.getenv("RMBGKEY") #fore, prompt, intensity, mode, refprompt, isrmbg #erased this prompt #"A car, on a high-tech city street at night, surrounded by neon lights and holographic billboards, with sleek skyscrapers and flying cars in the background, a glowing cyber-road beneath", outputs_list = [["examples/out1.png"],["examples/out3.png"], ["examples/out4.png"],["examples/out5.png"],["examples/out6.png"],["examples/out7.png"],["examples/out2.png"]] prompt_list = ["A Perfume Bottle, resting on a wooden table, surrounded by lavender leaves with droplets of morning dew glistening, in a serene garden, under the soft glow of a twilight sky, encircled by delicate purple petals", "A PAir of Shoes, resting on a sleek white platform under a bright spotlight, with subtle pastel-colored reflections and delicate flower petals scattered around, creating a cheerful and elegant setting", "A Perfume bottle, perched delicately on a rock by the ocean, with a breathtaking sunset casting a golden glow over the waves", "A Bottle, placed on a rustic wooden table, viewed from a slight above angle, overlooking a serene beach, with seashells and driftwood artfully scattered around, bathed in the warm glow of the setting sun", "A Sofa, In a snug, inviting room, a large sofa accompanied by plush cushions, a thick rug, book-filled shelves, potted plants, framed art, a coffee table with candles, a floor lamp, and gentle, warm lighting", "Headphones, hovering gracefully against a swirling pastel galaxy, accented by glimmers of stardust, ethereal light beams, and floating musical notes, under a dreamy aurora borealis, with no visible wires" ] example_list = [ [ "examples/in5.png", "A Perfume bottle, perched delicately on a rock ⁄by the ocean, with a breathtaking sunset casting a golden glow over the waves", 3.5, "full", "(( A transparent perfume Bottle with a black cap )), and a yellow liquide, sunset, transparent", False, "examples/out5.png" ], [ "examples/in6.png", "A Sofa, In a snug, inviting room, a large sofa accompanied by plush cushions, a thick rug, book-filled shelves, potted plants, framed art, a coffee table with candles, a floor lamp, and gentle, warm lighting", 3.5, "full", "A light grey Sofa, studio light, shadows", False, "examples/out6.png" ], #[ # "examples/in7.png", # "A car, on a high-tech city street at night, surrounded by neon lights and holographic billboards, with sleek skyscrapers and flying cars in the background, a glowing cyber-road beneath", # 3.0, # "full", # "(( A light grey Car )), vibrant, reflections", # False, # "examples/out7.png" #], [ "examples/in1.png", "A Perfume Bottle, resting on a wooden table, surrounded by lavender leaves with droplets of morning dew glistening, in a serene garden, under the soft glow of a twilight sky, encircled by delicate purple petals", 3.0, "full", "A perfume Bottle with a purple liquide, studio light", False, "examples/out1.png" ], [ "examples/in2.png", "Headphones, hovering gracefully against a swirling pastel galaxy, accented by glimmers of stardust, ethereal light beams, and floating musical notes, under a dreamy aurora borealis, with no visible wires", 3.50, "full", "headphones, vibrant, colorful, ", False, "examples/out2.png" ], [ "examples/in3.png", "A PAir of Shoes, resting on a sleek white platform under a bright spotlight, with subtle pastel-colored reflections and delicate flower petals scattered around, creating a cheerful and elegant setting", 3.5, "full", "(( A Pair of Brown Shoes )), vibrant, Shadow", False, "examples/out3.png" ], [ "examples/in4.png", "A Bottle, placed on a rustic wooden table, viewed from a slight above angle, overlooking a serene beach, with seashells and driftwood artfully scattered around, bathed in the warm glow of the setting sun", 3.50, "full", "A transparent Bottle with a black cap and a semi transparent liquide, Sunset", False, "examples/out4.png" ] ] def rmbg(pil_image): # Convert PIL image to bytes image_bytes = BytesIO() pil_image.save(image_bytes, format='PNG') image_bytes.seek(0) # Send the image to the remove.bg API response = requests.post( 'https://api.remove.bg/v1.0/removebg', files={'image_file': ('filename.png', image_bytes, 'image/png')}, data={'size': 'auto'}, headers={'X-Api-Key': rmbgkey} ) if response.status_code == 200: # Convert the bytes response to a PIL image result_image = Image.open(BytesIO(response.content)) return result_image else: return None def image_to_base64(image): # Open the image file with image: # Create a buffer to hold the binary data buffered = BytesIO() # Save the image in its original format to the buffer #print(image.format) image.save(buffered, format="PNG") # Get the byte data from the buffer binary_image_data = buffered.getvalue() # Encode the binary data to a base64 string base64_image = base64.b64encode(binary_image_data).decode("utf-8") return base64_image def create_square_image(image): """ Create a new square image with the side length equal to the largest dimension of the original image and paste the original image at the center on a transparent canvas. :param image: A PIL image. :return: A new square PIL image. """ original_width, original_height = image.size new_side_length = max(original_width, original_height) # Create a new square image with a transparent background new_image = Image.new("RGBA", (new_side_length, new_side_length), (255, 255, 255, 0)) # Calculate the position to paste the original image on the new square canvas paste_x = (new_side_length - original_width) // 2 paste_y = (new_side_length - original_height) // 2 # Paste the original image onto the new square canvas using the alpha channel as a mask new_image.paste(image, (paste_x, paste_y), image) return new_image def process(data, api, api_key): runpod.api_key = api_key input_payload = {"input": data } try: endpoint = runpod.Endpoint(api) run_request = endpoint.run(input_payload) # Initial check without blocking, useful for quick tasks status = run_request.status() print(f"Initial job status: {status}") if status=="IN_QUEUE": gr.Info("Queued 🚢🚢🚢🚢!", duration=35) if status != "COMPLETED": # Polling with timeout for long-running tasks output = run_request.output(timeout=120) else: output = run_request.output() print(f"Job output: {output}") except Exception as e: print(f"An error occurred: {e}") status = run_request.status() if status=="FAILED": raise gr.Error(f"An error occured πŸ’₯! {e}", duration=5) if status=="TIMED_OUT": raise gr.Error("Sorry we could not secure a worker for you ⏳! Try again", duration=5) image_data = output['image'] # Decode the Base64 string image_bytes = base64.b64decode(image_data) # Convert binary data to image image = Image.open(BytesIO(image_bytes)) return image def resize_to_fit(max_size, original_size): """ Calculate the new size for an image to fit within max_size while maintaining the aspect ratio. :param max_size: Maximum allowed size as a tuple (width, height). :param original_size: Original size of the image as a tuple (width, height). :return: New size as a tuple (new_width, new_height) that fits within max_size while maintaining the aspect ratio. """ original_width, original_height = original_size max_width, max_height = max_size # Calculate the scaling factor to maintain aspect ratio width_ratio = max_width / original_width height_ratio = max_height / original_height scaling_factor = min(width_ratio, height_ratio) # Calculate the new size while maintaining the aspect ratio new_width = int(original_width * scaling_factor) new_height = int(original_height * scaling_factor) return new_width, new_height def process_generate(fore, prompt, intensity, mode, refprompt, isrmbg, model_type, steps, control_strength): max_size = (768,768) if prompt in prompt_list: max_size = (768,768) if isrmbg: try: rmbgfore = rmbg(fore) if rmbgfore is not None: fore = rmbgfore.convert("RGBA") print(f"Background removed!") except: pass fore = create_square_image(fore) size = fore.size image_width = size[0] image_height = size[1] if size[0]*size[1]<=(512*512): gr.Warning("ℹ️ The input image resolution is low, it might lead to some deformation!") if size[0]*size[1]>(max_size[0]*max_size[1]): gr.Warning("ℹ️ The input image size is too big, I will lower it!") image_width, image_height = resize_to_fit((768,768), (image_width, image_height)) fore.resize(max_size) forestr = image_to_base64(fore.convert("RGBA")) data = { "foreground_image64": forestr, "prompt" : prompt, "mode" : mode, "intensity" : float(intensity), "width" : 1000, "height" : 1000, "refprompt" : refprompt, "first_stage_steps" : int(steps), "first_stage_strength" : float(control_strength), "second_stage_steps" : 20 } # Select the endpoint based on the model type if model_type == "anime": api_endpoint = api_anime #to change to api_anime elif model_type == "realistic": api_endpoint = api else: raise ValueError("Invalid model type selected.") #print(f"DATA: {data}") ''' data = { "foreground_image64": forestr, "prompt" : "There is Perfume, nestled on a crystalline cliff of glistening snow, under a celestial night sky adorned with constellations and swirling galaxies, framed by ethereal, blue flames that dance gracefully in the icy air", "mode" : "full", #refiner, full "intensity" : 3.0, "width" : 1000, "height" : 1000, "refprompt" : " transparent glass " } ''' image = process(data, api_endpoint, api_key) return image def update_value(val): return val class Stage(Enum): FIRST_STAGE = "first-stage" SECOND_STAGE = "refiner" FULL = "full" css="""#disp_image { text-align: center; } #share-btn-container { padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto; } #share-btn-container > div { flex-direction: row; background: black; align-items: center; } #share-btn-container:hover { background-color: #060606; } #share-btn { all: initial; color: #ffffff; font-weight: 600; cursor: pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important; right: 0; } #share-btn * { all: unset; } #share-btn-container div:nth-child(-n+2) { width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } #share-btn-container.hidden { display: none !important; } #duplicate-button { margin-left: auto; color: #fff; background: #1565c0; } body { font-family: Arial, sans-serif; background-color: #f4f4f9; margin: 0; padding: 0; min-height: 100vh; color: #333; } .custom-button { background: linear-gradient(271.15deg, #00C7E2 0.27%, #00CC6A 48.52%, #70FF00 102.07%); font-size: 30px; color: white; padding: 10px 20px; border: none; border-radius: 5px; cursor: pointer; transition: opacity 0.3s ease; } .custom-button:hover { opacity: 0.8; } .custom-title { font-size: 36px; background: linear-gradient(271.15deg, #00C7E2 0.27%, #12C06D 102.07%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; font-weight: bold; text-align: center; margin-bottom: 20px; } h1 { color: #222222; } a { color: #4a90e2; text-decoration: none; font-weight: bold; } a:hover { text-decoration: underline; } .emoji { font-size: 1.5em; } @media (max-width: 768px) { .custom-button { font-size: 20px; padding: 8px 16px; } .custom-title { font-size: 28px; } #disp_image, .container { padding: 0 1rem; } h1 { font-size: 24px; } } @media (max-width: 480px) { .custom-button { font-size: 16px; padding: 6px 12px; } .custom-title { font-size: 22px; } .container { padding: 0 0.5rem; } h1 { font-size: 20px; } #share-btn-container { max-width: 100%; padding: 0.5rem; } } """ block = gr.Blocks(css=css, title="## FAI Fuzer").queue(default_concurrency_limit=12) with block: gr.HTML('''

FAI Fuzer medium v0.3: Empower your AI Image Generation with Full Control🎨

Welcome to the Updated Version of FAI Fuzer!

This is FAI Fuzer medium v0.3, an updated version of our previously released Fuzer v0.1. In this version, we've introduced several enhancements, including the addition of a new anime model to broaden your creative possibilities.

We appreciate the feedback and support from our community and are excited to bring these new features to you.

What's New in v0.3:

Please check us out on Twitter: Fotographer AI and Discord: https://discord.gg/7bKaeXfH

Step-by-Step Instructions

Follow these instructions to control the generation of backgrounds while keeping the foreground's shape and style consistent:

''') gr.HTML("""

πŸ”— Check out our API!

""") with gr.Row(): gr.Markdown("### F.ai Fuzer: Real Composite Photography in 2 minutes!") with gr.Row(): fore = gr.Image(image_mode='RGBA', type="pil", label="Foreground Image", height=400, width=400, min_width=400) # with gr.Column(): result_gallery = gr.Image(label='Output', min_width=400) #gr.Gallery(height=400, object_fit='contain', label='Outputs') with gr.Row(): prompt = gr.Textbox(label="Prompt", min_width=400) # with gr.Column(): refprompt = gr.Textbox(label="Refiner Prompt", min_width=400) with gr.Row(): with gr.Column(min_width=400): mode = gr.Radio(choices=[e.value for e in Stage], value=Stage.FULL.value, label="Generation Mode", type='value', min_width=400) mode.change(fn=update_value, inputs=mode, outputs=mode) model_type = gr.Dropdown(choices=["realistic","anime"], label="Model Type", value="realistic", min_width=400) # Adding the Dropdown for model selection with gr.Column(min_width=400): # Adding Advanced Settings for the number of steps and control strength with gr.Accordion("Advanced Settings", open=True, ): steps = gr.Slider(label="Number of Steps", minimum=10, maximum=100, value=30, step=1) control_strength = gr.Slider(label="Control Strength", minimum=0.1, maximum=1.0, value=0.5, step=0.1) with gr.Column(min_width=400): gr.HTML('''

πŸš€ For more freedom of usage, check out our API

πŸ‘€ You can test with free credits:

πŸ”— API Dashboard

''') with gr.Row(): intensity = gr.Slider(label="Refiner Strength", minimum=1.0, maximum=7.0, value=3.0, step=0.5) intensity.change(fn=update_value, inputs=intensity, outputs=intensity) isrmbg = gr.Checkbox(label="Remove Background") isrmbg.change(fn=update_value, inputs=isrmbg, outputs=isrmbg) generate_button = gr.Button(value="Generate", elem_classes="custom-button") gr.HTML('''

Features:

''') gr.HTML("""

πŸ”— Check Out our other Projects Here!

""") with gr.Row(): dummy_image_for_outputs = gr.Image(visible=False, label='Result') gr.Examples( fn=lambda *args: [args[-1]], examples=example_list, inputs=[ fore, prompt, intensity, mode, refprompt, isrmbg, dummy_image_for_outputs ] ) ''' with gr.Column(): dummy_image_for_outputs = gr.Image(visible=False, label='Result') gr.Examples( examples=outputs_list, inputs=[dummy_image_for_outputs], ) ''' ins = [fore, prompt, intensity, mode, refprompt, isrmbg, model_type, steps, control_strength] generate_button.click(fn=process_generate, inputs=ins, outputs=[result_gallery]) block.launch()