File size: 566 Bytes
0602100
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from PIL import Image
import torch
from transformers import CLIPProcessor, CLIPModel

def generate_image_from_text(prompt):
    model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
    processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
    
    inputs = processor(text=[prompt], images=None, return_tensors="pt", padding=True)
    outputs = model(**inputs)
    
    # Dummy image return (you can integrate real image generation later)
    img = Image.new("RGB", (256, 256), color="blue")  # Just a placeholder image
    return img