SrikanthChellappa commited on
Commit
842ad9a
1 Parent(s): 5495583

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -2,7 +2,6 @@ import gradio as gr
2
  import os
3
  import spaces
4
  from transformers import GemmaTokenizer, AutoModelForCausalLM
5
- from awq import AutoAWQForCausalLM
6
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
7
  from threading import Thread
8
 
@@ -50,10 +49,10 @@ footer{display:none !important}
50
  """
51
 
52
  # Load the tokenizer and model
53
- model_name="collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v2-AWQ-4Bit"
54
  tokenizer = AutoTokenizer.from_pretrained(model_name)
55
- #model = AutoModelForCausalLM.from_pretrained("collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v1", device_map="auto") # to("cuda:0")
56
- model = AutoAWQForCausalLM.from_pretrained(model_name, device_map='auto')
57
  terminators = [
58
  tokenizer.eos_token_id,
59
  tokenizer.convert_tokens_to_ids("<|eot_id|>")
 
2
  import os
3
  import spaces
4
  from transformers import GemmaTokenizer, AutoModelForCausalLM
 
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
6
  from threading import Thread
7
 
 
49
  """
50
 
51
  # Load the tokenizer and model
52
+ model_name="collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v2"
53
  tokenizer = AutoTokenizer.from_pretrained(model_name)
54
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") # to("cuda:0")
55
+ #model = AutoAWQForCausalLM.from_pretrained(model_name, device_map='auto')
56
  terminators = [
57
  tokenizer.eos_token_id,
58
  tokenizer.convert_tokens_to_ids("<|eot_id|>")