File size: 3,551 Bytes
876b664
 
 
 
 
 
 
 
 
 
 
1c94d3e
 
 
a04510d
876b664
 
 
 
 
a04510d
 
 
 
1c94d3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
876b664
a04510d
1c94d3e
 
 
 
 
 
 
 
876b664
1c94d3e
 
 
 
 
 
 
 
 
 
 
 
876b664
1c94d3e
876b664
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from diffusers import (StableDiffusionXLImg2ImgPipeline, AutoencoderKL)                   
from diffusers.utils import load_image
import torch
import time
import utilities as u
import card_generator as card
from PIL import Image

pipe = None
start_time = time.time()
torch.backends.cuda.matmul.allow_tf32 = True
model_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/card-generator-v1.safetensors"
lora_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/blank-card-template-5.safetensors"
detail_lora_path = "/media/drakosfire/Shared/models/stable-diffusion/card-generator-v1/add-detail-xl.safetensors"
mimic_lora_path = :"/media/drakosfire/Shared/CardGenerator/models/stable-diffusion/Loras/EnvyMimicXL01.safetensors"
temp_image_path = "./image_temp/"
card_pre_prompt = " blank magic card,high resolution, detailed intricate high quality border, textbox, high quality detailed magnum opus drawing of a "
negative_prompts = "text, words, numbers, letters"
image_list = []

class img_generator():

    def load_img_gen(self,prompt, item, mimic = None):
        print(f"Input Prompt = {prompt} + Item : {item}")
        prompt = card_pre_prompt + item + ' ' + prompt
        print(prompt)
        
        
        pipe = StableDiffusionXLImg2ImgPipeline.from_single_file(model_path,
                                                        custom_pipeline="low_stable_diffusion",                                                       
                                                            torch_dtype=torch.float16, 
                                                            variant="fp16").to("cuda")  
        # Load LoRAs for controlling image
        #pipe.load_lora_weights(lora_path, weight_name = "blank-card-template-5.safetensors",adapter_name = 'blank-card-template')    
        pipe.load_lora_weights(detail_lora_path, weight_name = "add-detail-xl.safetensors", adapter_name = "add-detail-xl")
        
        # If mimic keyword has been detected, load the mimic LoRA and set adapter values
        if mimic:
            print("MIMIC!")
            pipe.load_lora_weights(mimic_lora_path, weight_name = "EnvyMimicXL01.safetensors", adapter_name = "EnvyMimicXL")
            pipe.set_adapters(['blank-card-template', "add-detail-xl", "EnvyMimicXL"], adapter_weights = [0.9,0.9,1.0])
        else : 
            pipe.set_adapters([ "add-detail-xl"], adapter_weights = [0.9])       
        pipe.enable_vae_slicing()
        return pipe, prompt

    def preview_and_generate_image(self,x,pipe, prompt, user_input_template, item):    
        img_start = time.time()   
        image = pipe(prompt=prompt,
                    strength = .9,
                    guidance_scale = 5,
                    image= user_input_template,
                    negative_prompt = negative_prompts,
                    num_inference_steps=40,
                    height = 1024, width = 768).images[0]
        
        image = image.save(temp_image_path+str(x) + f"{item}.png")
        output_image_path = temp_image_path+str(x) + f"{item}.png"
        img_time = time.time() - img_start
        img_its = 50/img_time
        print(f"image gen time = {img_time} and {img_its} it/s")
            
        # Delete the image variable to keep VRAM open to load the LLM
        del image
        print(f"Memory after del {torch.cuda.memory_allocated()}")
        print(image_list)
        total_time = time.time() - start_time
        print(total_time)

        return output_image_path