doberst commited on
Commit
7864fe7
1 Parent(s): 96cbce6

Upload 2 files

Browse files
generation_test_hf_script.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+
7
+ def load_rag_benchmark_tester_ds():
8
+
9
+ # pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
10
+ from datasets import load_dataset
11
+
12
+ ds_name = "llmware/rag_instruct_benchmark_tester"
13
+
14
+ dataset = load_dataset(ds_name)
15
+
16
+ print("update: loading test dataset - ", dataset)
17
+
18
+ test_set = []
19
+ for i, samples in enumerate(dataset["train"]):
20
+ test_set.append(samples)
21
+
22
+ # to view test set samples
23
+ # print("rag benchmark dataset test samples: ", i, samples)
24
+
25
+ return test_set
26
+
27
+
28
+ def run_test(model_name, test_ds):
29
+
30
+ device = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ print("update: model will be loaded on device - ", device)
33
+
34
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
35
+ model.to(device)
36
+
37
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
38
+
39
+ for i, entries in enumerate(test_ds):
40
+
41
+ # prepare prompt packaging used in fine-tuning process
42
+ new_prompt = "<human>: " + entries["context"] + "\n" + entries["query"] + "\n" + "<bot>:"
43
+
44
+ inputs = tokenizer(new_prompt, return_tensors="pt")
45
+ start_of_output = len(inputs.input_ids[0])
46
+
47
+ # temperature: set at 0.3 for consistency of output
48
+ # max_new_tokens: set at 100 - may prematurely stop a few of the summaries
49
+
50
+ outputs = model.generate(
51
+ inputs.input_ids.to(device),
52
+ eos_token_id=tokenizer.eos_token_id,
53
+ pad_token_id=tokenizer.eos_token_id,
54
+ do_sample=True,
55
+ temperature=0.3,
56
+ max_new_tokens=100,
57
+ )
58
+
59
+ output_only = tokenizer.decode(outputs[0][start_of_output:],skip_special_tokens=True)
60
+
61
+ # quick/optional post-processing clean-up of potential fine-tuning artifacts
62
+
63
+ eot = output_only.find("<|endoftext|>")
64
+ if eot > -1:
65
+ output_only = output_only[:eot]
66
+
67
+ bot = output_only.find("<bot>:")
68
+ if bot > -1:
69
+ output_only = output_only[bot+len("<bot>:"):]
70
+
71
+ # end - post-processing
72
+
73
+ print("\n")
74
+ print(i, "llm_response - ", output_only)
75
+ print(i, "gold_answer - ", entries["answer"])
76
+
77
+ return 0
78
+
79
+
80
+ if __name__ == "__main__":
81
+
82
+ test_ds = load_rag_benchmark_tester_ds()
83
+
84
+ model_name = "llmware/dragon-llama-7b-v0"
85
+ output = run_test(model_name,test_ds)
86
+
87
+
generation_test_llmware_script.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from llmware.prompts import Prompt
3
+
4
+
5
+ def load_rag_benchmark_tester_ds():
6
+
7
+ # pull 200 question rag benchmark test dataset from LLMWare HuggingFace repo
8
+ from datasets import load_dataset
9
+
10
+ ds_name = "llmware/rag_instruct_benchmark_tester"
11
+
12
+ dataset = load_dataset(ds_name)
13
+
14
+ print("update: loading test dataset - ", dataset)
15
+
16
+ test_set = []
17
+ for i, samples in enumerate(dataset["train"]):
18
+ test_set.append(samples)
19
+
20
+ # to view test set samples
21
+ # print("rag benchmark dataset test samples: ", i, samples)
22
+
23
+ return test_set
24
+
25
+
26
+ def run_test(model_name, prompt_list):
27
+
28
+ print("\nupdate: Starting RAG Benchmark Inference Test")
29
+
30
+ prompter = Prompt().load_model(model_name,from_hf=True)
31
+
32
+ for i, entries in enumerate(prompt_list):
33
+
34
+ prompt = entries["query"]
35
+ context = entries["context"]
36
+
37
+ response = prompter.prompt_main(prompt,context=context,prompt_name="default_with_context", temperature=0.3)
38
+
39
+ fc = prompter.evidence_check_numbers(response)
40
+ sc = prompter.evidence_comparison_stats(response)
41
+ sr = prompter.evidence_check_sources(response)
42
+
43
+ print("\nupdate: model inference output - ", i, response["llm_response"])
44
+ print("update: gold_answer - ", i, entries["answer"])
45
+
46
+ for entries in fc:
47
+ print("update: fact check - ", entries["fact_check"])
48
+
49
+ for entries in sc:
50
+ print("update: comparison stats - ", entries["comparison_stats"])
51
+
52
+ for entries in sr:
53
+ print("update: sources - ", entries["source_review"])
54
+
55
+ return 0
56
+
57
+
58
+ if __name__ == "__main__":
59
+
60
+ core_test_set = load_rag_benchmark_tester_ds()
61
+
62
+ model_name = "llmware/dragon-llama-7b-v0"
63
+
64
+ output = run_test(model_name, core_test_set)