i4never commited on
Commit
02a623c
1 Parent(s): 0f99a79

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +64 -80
README.md CHANGED
@@ -1,5 +1,6 @@
1
  ---
2
  license: apache-2.0
 
3
  ---
4
  <div style="width: 100%;">
5
  <img src="http://x-pai.algolet.com/bot/img/logo_core.png" alt="TigerBot" style="width: 20%; display: block; margin: auto;">
@@ -8,85 +9,68 @@ license: apache-2.0
8
  <font face="黑体" size=5"> A cutting-edge foundation for your very own LLM. </font>
9
  </p>
10
  <p align="center">
11
- 🌐 <a href="https://tigerbot.com/" target="_blank">TigerBot</a> • 🤗 <a href="https://huggingface.co/TigerResearch" target="_blank">Hugging Face</a>
12
  </p>
 
13
 
14
- ## Github
15
-
16
- https://github.com/TigerResearch/TigerBot
17
-
18
- ## Usage
19
-
20
- ```python
21
- import os
22
-
23
- import fire
24
- import torch
25
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
26
-
27
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
28
-
29
- tok_ins = "\n\n### Instruction:\n"
30
- tok_res = "\n\n### Response:\n"
31
- prompt_input = tok_ins + "{instruction}" + tok_res
32
-
33
-
34
- def main(
35
- model_path: str,
36
- max_input_length: int = 512,
37
- max_generate_length: int = 1024,
38
- ):
39
- print(f"loading model: {model_path}...")
40
-
41
- model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map='auto')
42
-
43
- generation_config = GenerationConfig.from_pretrained(model_path)
44
- generation_config.max_length = max_generate_length
45
- print(generation_config)
46
-
47
- device = torch.cuda.current_device()
48
-
49
- tokenizer = AutoTokenizer.from_pretrained(
50
- model_path,
51
- model_max_length=max_generate_length,
52
- padding_side="left",
53
- truncation_side='left',
54
- padding=True,
55
- truncation=True
56
- )
57
- if tokenizer.model_max_length is None or tokenizer.model_max_length > max_generate_length:
58
- tokenizer.model_max_length = max_generate_length
59
-
60
- sess_text = ""
61
- while True:
62
- raw_text = input("prompt(\"exit\" to end, \"clear\" to clear session) >>> ")
63
- if not raw_text:
64
- print('prompt should not be empty!')
65
- continue
66
- if raw_text.strip() == "exit":
67
- print('session ended.')
68
- break
69
- if raw_text.strip() == "clear":
70
- print('session cleared.')
71
- sess_text = ""
72
- continue
73
-
74
- query_text = raw_text.strip()
75
- sess_text += tok_ins + query_text
76
- input_text = prompt_input.format_map({'instruction': sess_text.split(tok_ins, 1)[1]})
77
- inputs = tokenizer(input_text, return_tensors='pt', truncation=True, max_length=max_input_length)
78
- inputs = {k: v.to(device) for k, v in inputs.items()}
79
- output = model.generate(**inputs, **generation_config.to_dict())
80
- output_str = tokenizer.decode(output[0], skip_special_tokens=False, spaces_between_special_tokens=False)
81
- answer = output_str.rsplit(tok_res, 1)[1].strip()
82
- if answer.endswith(tokenizer.eos_token):
83
- answer = answer.rsplit(tokenizer.eos_token, 1)[0].strip()
84
-
85
- sess_text += tok_res + answer
86
-
87
- print("=" * 100)
88
- print(answer)
89
- print("=" * 100)
90
-
91
- main('TigerResearch/tigerbot-7b-chat')
92
- ```
 
1
  ---
2
  license: apache-2.0
3
+ pipeline_tag: text-generation
4
  ---
5
  <div style="width: 100%;">
6
  <img src="http://x-pai.algolet.com/bot/img/logo_core.png" alt="TigerBot" style="width: 20%; display: block; margin: auto;">
 
9
  <font face="黑体" size=5"> A cutting-edge foundation for your very own LLM. </font>
10
  </p>
11
  <p align="center">
12
+ 💻<a href="https://github.com/TigerResearch/TigerBot" target="_blank">Github</a> • 🌐 <a href="https://tigerbot.com/" target="_blank">TigerBot</a> • 🤗 <a href="https://huggingface.co/TigerResearch" target="_blank">Hugging Face</a>
13
  </p>
14
+ # 快速开始
15
 
16
+ - 方法1,通过transformers使用
17
+
18
+ - 下载 TigerBot Repo
19
+
20
+ `git clone https://github.com/TigerResearch/TigerBot.git`
21
+
22
+ - 启动infer代码
23
+
24
+ `python infer.py --model_path TigerResearch/tigerbot-13b-chat`
25
+
26
+ - 方法2:
27
+
28
+ - 下载 TigerBot Repo
29
+
30
+ `git clone https://github.com/TigerResearch/TigerBot.git`
31
+
32
+ - 安装git lfs: `git lfs install`
33
+
34
+ - 通过huggingface或modelscope平台下载权重
35
+
36
+ ```shell
37
+ GIT_LFS_SKIP_SMUDGE=0
38
+
39
+ # From huggingface
40
+ git clone https://huggingface.co/TigerResearch/tigerbot-7b-chat
41
+ # From modelscope
42
+ git clone https://www.modelscope.cn/TigerResearch/tigerbot-7b-chat-v3.git
43
+ ```
44
+
45
+ ------
46
+
47
+ # Quick Start
48
+
49
+ - Method 1, use through transformers
50
+
51
+ - Clone TigerBot Repo
52
+
53
+ `git clone https://github.com/TigerResearch/TigerBot.git`
54
+
55
+ - Run infer script
56
+
57
+ `python infer.py --model_path TigerResearch/tigerbot-13b-chat`
58
+
59
+ - Method 2:
60
+
61
+ - Clone TigerBot Repo
62
+
63
+ `git clone https://github.com/TigerResearch/TigerBot.git`
64
+
65
+ - install git lfs: `git lfs install`
66
+
67
+ - Download weights from huggingface or modelscope
68
+
69
+ ```shell
70
+ GIT_LFS_SKIP_SMUDGE=0
71
+
72
+ # From huggingface
73
+ git clone https://huggingface.co/TigerResearch/tigerbot-7b-chat
74
+ # From modelscope
75
+ git clone https://www.modelscope.cn/TigerResearch/tigerbot-7b-chat-v3.git
76
+ ```