red1xe commited on
Commit
56e7c58
1 Parent(s): 9861a8a

add application file

Browse files
Files changed (1) hide show
  1. app.py +2 -14
app.py CHANGED
@@ -1,11 +1,5 @@
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, Trainer, pipeline
2
- from peft import PeftModel, PeftConfig
3
  from huggingface_hub import login
4
- import bitsandbytes as bnb
5
- import torch
6
- import time
7
- import pandas as pd
8
- import numpy as np
9
 
10
 
11
  import streamlit as st
@@ -19,14 +13,8 @@ st.set_page_config(
19
  login(token='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', add_to_git_credential=True)
20
 
21
  st.title("Code Generation")
22
- st.write('MODEL: TinyPixel/Llama-2-7B-bf16-sharded')
23
- bnb_config = BitsAndBytesConfig(
24
- load_in_4bit=True,
25
- bnb_4bit_use_double_quant=True,
26
- bnb_4bit_quant_type="nf4",
27
- bnb_4bit_compute_dtype=torch.bfloat16
28
- )
29
  model_name='red1xe/Llama-2-7B-codeGPT'
30
  tokenizer = AutoTokenizer.from_pretrained(model_name)
31
- model= AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config)
32
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments, Trainer, pipeline
 
2
  from huggingface_hub import login
 
 
 
 
 
3
 
4
 
5
  import streamlit as st
 
13
  login(token='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', add_to_git_credential=True)
14
 
15
  st.title("Code Generation")
16
+ st.write('MODEL: TinyPixel/red1xe/Llama-2-7B-codeGPT')
 
 
 
 
 
 
17
  model_name='red1xe/Llama-2-7B-codeGPT'
18
  tokenizer = AutoTokenizer.from_pretrained(model_name)
19
+ model= AutoModelForCausalLM.from_pretrained(model_name)
20