DavidLanz commited on
Commit
71ded78
1 Parent(s): be689f2

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +124 -3
  2. adapter_config.json +29 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,124 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: peft
6
+ tags:
7
+ - facebook
8
+ - meta
9
+ - pytorch
10
+ - llama
11
+ - llama-2
12
+ base_model: DavidLanz/Llama3-tw-8B-Instruct
13
+ model_name: Llama 3 8B Instruct
14
+ inference: false
15
+ model_creator: Meta Llama 3
16
+ model_type: llama
17
+ pipeline_tag: text-generation
18
+ quantized_by: QLoRA
19
+ ---
20
+
21
+ # Model Card for Model ID
22
+
23
+ This PEFT weight is for predicting BTC price.
24
+
25
+ Disclaimer: This model is for a time series problem on LLM performance, and it's not for investment advice; any prediction results are not a basis for investment reference.
26
+
27
+ ## Model Details
28
+
29
+ Training data source: BTC/USD provided by [Binance](https://www.binance.com/).
30
+
31
+ ### Model Description
32
+
33
+ This repo contains QLoRA format model files for [Meta's Llama 3 8B tw Instruct](https://huggingface.co/DavidLanz/Llama3-tw-8B-Instruct).
34
+
35
+ ## Uses
36
+
37
+ ```python
38
+ import torch
39
+ from peft import LoraConfig, PeftModel
40
+
41
+ from transformers import (
42
+ AutoModelForCausalLM,
43
+ AutoTokenizer,
44
+ BitsAndBytesConfig,
45
+ HfArgumentParser,
46
+ TrainingArguments,
47
+ TextStreamer,
48
+ pipeline,
49
+ logging,
50
+ )
51
+
52
+ device_map = {"": 0}
53
+ use_4bit = True
54
+ bnb_4bit_compute_dtype = "float16"
55
+ bnb_4bit_quant_type = "nf4"
56
+ use_nested_quant = False
57
+ compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
58
+
59
+ bnb_config = BitsAndBytesConfig(
60
+ load_in_4bit=use_4bit,
61
+ bnb_4bit_quant_type=bnb_4bit_quant_type,
62
+ bnb_4bit_compute_dtype=compute_dtype,
63
+ bnb_4bit_use_double_quant=use_nested_quant,
64
+ )
65
+
66
+ based_model_path = "DavidLanz/Llama3-tw-8B-Instruct"
67
+ adapter_path = "DavidLanz/Llama3_tw_8B_btc_qlora"
68
+
69
+ base_model = AutoModelForCausalLM.from_pretrained(
70
+ based_model_path,
71
+ low_cpu_mem_usage=True,
72
+ return_dict=True,
73
+ quantization_config=bnb_config,
74
+ torch_dtype=torch.float16,
75
+ device_map=device_map,
76
+ )
77
+ model = PeftModel.from_pretrained(base_model, adapter_path)
78
+
79
+ tokenizer = AutoTokenizer.from_pretrained(based_model_path, trust_remote_code=True)
80
+
81
+ import torch
82
+ from transformers import pipeline, TextStreamer
83
+
84
+ text_gen_pipeline = pipeline(
85
+ "text-generation",
86
+ model=model,
87
+ model_kwargs={"torch_dtype": torch.bfloat16},
88
+ tokenizer=tokenizer,
89
+ )
90
+
91
+ messages = [
92
+ {
93
+ "role": "system",
94
+ "content": "你是一位專業的BTC虛擬貨幣分析師",
95
+ },
96
+ {"role": "user", "content": "昨日開盤價為64437.18,最高價為64960.37,最低價為62953.90,收盤價為64808.35,交易量為808273.27。請預測今日BTC的收盤價?"},
97
+ ]
98
+
99
+ prompt = text_gen_pipeline.tokenizer.apply_chat_template(
100
+ messages,
101
+ tokenize=False,
102
+ add_generation_prompt=True
103
+ )
104
+
105
+ terminators = [
106
+ text_gen_pipeline.tokenizer.eos_token_id,
107
+ text_gen_pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
108
+ ]
109
+
110
+ outputs = text_gen_pipeline(
111
+ prompt,
112
+ max_new_tokens=256,
113
+ eos_token_id=terminators,
114
+ do_sample=True,
115
+ temperature=0.6,
116
+ top_p=0.9,
117
+ )
118
+ print(outputs[0]["generated_text"][len(prompt):])
119
+ ```
120
+
121
+ ### Framework versions
122
+
123
+ - PEFT 0.11.1
124
+ - PEFT 0.10.0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "DavidLanz/Llama3-tw-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:345cf8f5ccde0dc06a328ecf2dcdd0a220567e96372728db00b3b79d87702d51
3
+ size 109069176