srinidhidevaraj commited on
Commit
604b05d
1 Parent(s): 47e58ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -9,7 +9,7 @@ from transformers import MllamaForConditionalGeneration, AutoProcessor
9
  import torch
10
  from accelerate import init_empty_weights
11
  # Load environment variables
12
-
13
  # Configure Gemini API
14
  # genai.configure(api_key=os.getenv("gkey2"))
15
 
@@ -27,12 +27,14 @@ prompt="<|image|><|begin_of_text|>You are a helpful assistant. Please respond to
27
  # model = Ollama(model="llama3.2")
28
  model_id = "meta-llama/Llama-3.2-11B-Vision"
29
 
30
- model = MllamaForConditionalGeneration.from_pretrained(
31
- model_id,
32
- torch_dtype=torch.bfloat16,
33
- device_map="auto",
34
- )
35
- processor = AutoProcessor.from_pretrained(model_id)
 
 
36
 
37
  # Define function to get response from the model
38
  def get_gemin_response(input_text, img):
 
9
  import torch
10
  from accelerate import init_empty_weights
11
  # Load environment variables
12
+ from transformers import AutoProcessor, AutoModelForPreTraining
13
  # Configure Gemini API
14
  # genai.configure(api_key=os.getenv("gkey2"))
15
 
 
27
  # model = Ollama(model="llama3.2")
28
  model_id = "meta-llama/Llama-3.2-11B-Vision"
29
 
30
+ # model = MllamaForConditionalGeneration.from_pretrained(
31
+ # model_id,
32
+ # torch_dtype=torch.bfloat16,
33
+ # device_map="auto",
34
+ # )
35
+ # processor = AutoProcessor.from_pretrained(model_id)
36
+ processor = AutoProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
37
+ model = AutoModelForPreTraining.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
38
 
39
  # Define function to get response from the model
40
  def get_gemin_response(input_text, img):