red1xe commited on
Commit
6a505b7
1 Parent(s): b61717e
Files changed (1) hide show
  1. app.py +1 -39
app.py CHANGED
@@ -1,5 +1,5 @@
1
  from datasets import load_dataset
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TrainingArguments, Trainer
3
  import torch
4
  import time
5
  import evaluate
@@ -18,41 +18,3 @@ st.set_page_config(
18
  login(token='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', add_to_git_credential=True)
19
 
20
  st.title("Code Generation")
21
- huggingface_dataset_name = "red1xe/code_instructions"
22
- dataset = load_dataset(huggingface_dataset_name)
23
- model_name='bigcode/starcoder'
24
- tokenizer = AutoTokenizer.from_pretrained(model_name)
25
- original_model = AutoModelForCausalLM.from_pretrained(model_name)
26
-
27
- x = st.slider(label='Select a sample', min_value=0, max_value=1000, value=500, step=10)
28
- if st.button("Show Sample"):
29
- index = x
30
-
31
- input = dataset['test'][index]['input']
32
- instruction = dataset['test'][index]['instruction']
33
- output = dataset['test'][index]['output']
34
-
35
- prompt = f"""
36
- Answer the following question.
37
-
38
- {input} {instruction}
39
-
40
- Answer:
41
- """
42
-
43
- inputs = tokenizer(prompt, return_tensors='pt')
44
- outputs = tokenizer.decode(
45
- original_model.generate(
46
- inputs["input_ids"],
47
- max_new_tokens=200,
48
- )[0],
49
- skip_special_tokens=True
50
- )
51
-
52
- dash_line = '-'.join('' for x in range(100))
53
- st.write(dash_line)
54
- st.write(f'INPUT PROMPT:\n{prompt}')
55
- st.write(dash_line)
56
- st.write(f'BASELINE HUMAN SUMMARY:\n{output}\n')
57
- st.write(dash_line)
58
- st.write(f'MODEL GENERATION - ZERO SHOT:\n{outputs}')
 
1
  from datasets import load_dataset
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TrainingArguments, Trainer, pipeline
3
  import torch
4
  import time
5
  import evaluate
 
18
  login(token='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', add_to_git_credential=True)
19
 
20
  st.title("Code Generation")