|
import gradio as gr |
|
from transformers import AutoProcessor, AutoModelForVisionText2Text |
|
import os |
|
import torch |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
if not hf_token: |
|
raise ValueError("HF_TOKEN çevresel değişkeni ayarlanmamış. Lütfen Hugging Face token'ınızı ayarlayın.") |
|
|
|
|
|
model_name = "meta-llama/Llama-3.2-90B-Vision-Instruct" |
|
processor = AutoProcessor.from_pretrained(model_name, use_auth_token=hf_token) |
|
model = AutoModelForVisionText2Text.from_pretrained( |
|
model_name, |
|
use_auth_token=hf_token, |
|
device_map="auto", |
|
torch_dtype=torch.float16 |
|
) |
|
|
|
def predict(image, text): |
|
|
|
inputs = processor(images=image, text=text, return_tensors="pt").to(model.device) |
|
|
|
outputs = model.generate(**inputs, max_new_tokens=100) |
|
|
|
response = processor.batch_decode(outputs, skip_special_tokens=True)[0] |
|
return response |
|
|
|
|
|
interface = gr.Interface( |
|
fn=predict, |
|
inputs=[ |
|
gr.Image(type="pil", label="Görüntü Girdisi"), |
|
gr.Textbox(label="Metin Girdisi") |
|
], |
|
outputs=gr.Textbox(label="Çıktı"), |
|
title="Llama 3.2 90B Vision Instruct Demo", |
|
description="Bir görüntü ve metin girdisi alarak yanıt üreten model." |
|
) |
|
|
|
interface.launch() |
|
|