import json import requests import gradio as gr import random import time import os import datetime from datetime import datetime from PIL import Image from PIL import ImageOps from PIL import Image, ImageDraw, ImageFont from textwrap import wrap import json from io import BytesIO print('for update') API_TOKEN = os.getenv("API_TOKEN") HRA_TOKEN=os.getenv("HRA_TOKEN") from huggingface_hub import InferenceApi inference = InferenceApi("bigscience/bloom",token=API_TOKEN) headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} url_hraprompts='https://us-central1-createinsightsproject.cloudfunctions.net/gethrahfprompts' data={"prompt_type":'stable_diffusion_tee_shirt_text',"hra_token":HRA_TOKEN} try: r = requests.post(url_hraprompts, data=json.dumps(data), headers=headers) except requests.exceptions.ReadTimeout as e: print(e) #print(r.content) prompt_text=str(r.content, 'UTF-8') print(prompt_text) data={"prompt_type":'stable_diffusion_tee_shirt_image',"hra_token":HRA_TOKEN} try: r = requests.post(url_hraprompts, data=json.dumps(data), headers=headers) except requests.exceptions.ReadTimeout as e: print(e) #print(r.content) prompt_image=str(r.content, 'UTF-8') print(prompt_image) ENDPOINT_URL="https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1" # url of your endpoint #ENDPOINT_URL="https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-1-5" # url of your endpoint HF_TOKEN=API_TOKEN# token where you deployed your endpoint def generate_image(prompt_SD:str): print(prompt_SD) payload = {"inputs": prompt_SD,"seed":random.randint(0,100)} headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json", "Accept": "image/png" # important to get an image back } response = requests.post(ENDPOINT_URL, headers=headers, json=payload) print(response.content) img = Image.open(BytesIO(response.content)) return img def infer(prompt, max_length = 250, top_k = 0, num_beams = 0, no_repeat_ngram_size = 2, top_p = 0.9, seed=42, temperature=0.7, greedy_decoding = False, return_full_text = False): print('Empty input') print(prompt) top_k = None if top_k == 0 else top_k do_sample = False if num_beams > 0 else not greedy_decoding num_beams = None if (greedy_decoding or num_beams == 0) else num_beams no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size top_p = None if num_beams else top_p early_stopping = None if num_beams is None else num_beams > 0 params = { "max_new_tokens": max_length, "top_k": top_k, "top_p": top_p, "temperature": temperature, "do_sample": do_sample, "seed": seed, "early_stopping":early_stopping, "no_repeat_ngram_size":no_repeat_ngram_size, "num_beams":num_beams, "return_full_text":return_full_text } s = time.time() response = inference(prompt, params=params) #print(response) proc_time = time.time()-s #print(f"Processing time was {proc_time} seconds") return response def getadline(text_inp): print(text_inp) print(datetime.today().strftime("%d-%m-%Y")) text = prompt_text+"\nInput:"+text_inp + "\nOutput:" resp = infer(text,seed=random.randint(0,100)) generated_text=resp[0]['generated_text'] result = generated_text.replace(text,'').strip() result = result.replace("Output:","") parts = result.split("###") topic = parts[0].strip() topic="\n".join(topic.split('\n')) response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt') data_nsfw = response_nsfw.text nsfwlist=data_nsfw.split('\n') nsfwlowerlist=[] for each in nsfwlist: if each!='': nsfwlowerlist.append(each.lower()) nsfwlowerlist.extend(['bra','gay','lesbian',]) print(topic) mainstring=text_inp foundnsfw=0 for each_word in nsfwlowerlist: raw_search_string = r"\b" + each_word + r"\b" match_output = re.search(raw_search_string, mainstring) no_match_was_found = ( match_output is None ) if no_match_was_found: foundnsfw=0 else: foundnsfw=1 print(each_word) break if foundnsfw==1: topic="Unsafe content found. Please try again with different prompts." print(topic) return(topic) def getadvertisement(topic): if topic!='': input_keyword=topic else: input_keyword=getadline(random.choice('abcdefghijklmnopqrstuvwxyz')) if 'Unsafe content found' in input_keyword: input_keyword='Abstract art with a splash of colors' prompt_SD=input_keyword+','+prompt_image # generate image image = generate_image(prompt_SD) # save to disk image.save("finalimage.png") return 'finalimage.png' with gr.Blocks() as demo: gr.Markdown("