TheBloke commited on
Commit
c055c75
1 Parent(s): 44dd7a6

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -21
README.md CHANGED
@@ -6,15 +6,16 @@ license_name: microsoft-research-license
6
  model_creator: Jeb Carter
7
  model_name: Psyonic Cetacean 20B
8
  model_type: llama
9
- prompt_template: '<|im_start|>system
 
10
 
11
- {system_message}<|im_end|>
12
 
13
- <|im_start|>user
14
 
15
- {prompt}<|im_end|>
16
 
17
- <|im_start|>assistant
 
18
 
19
  '
20
  quantized_by: TheBloke
@@ -66,14 +67,15 @@ These files were quantised using hardware kindly provided by [Massed Compute](ht
66
  <!-- repositories-available end -->
67
 
68
  <!-- prompt-template start -->
69
- ## Prompt template: ChatML
70
 
71
  ```
72
- <|im_start|>system
73
- {system_message}<|im_end|>
74
- <|im_start|>user
75
- {prompt}<|im_end|>
76
- <|im_start|>assistant
 
77
 
78
  ```
79
 
@@ -251,11 +253,12 @@ from huggingface_hub import InferenceClient
251
  endpoint_url = "https://your-endpoint-url-here"
252
 
253
  prompt = "Tell me about AI"
254
- prompt_template=f'''<|im_start|>system
255
- {system_message}<|im_end|>
256
- <|im_start|>user
257
- {prompt}<|im_end|>
258
- <|im_start|>assistant
 
259
  '''
260
 
261
  client = InferenceClient(endpoint_url)
@@ -311,11 +314,12 @@ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
311
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
312
 
313
  prompt = "Tell me about AI"
314
- prompt_template=f'''<|im_start|>system
315
- {system_message}<|im_end|>
316
- <|im_start|>user
317
- {prompt}<|im_end|>
318
- <|im_start|>assistant
 
319
  '''
320
 
321
  print("\n\n*** Generate:")
 
6
  model_creator: Jeb Carter
7
  model_name: Psyonic Cetacean 20B
8
  model_type: llama
9
+ prompt_template: 'Below is an instruction that describes a task. Write a response
10
+ that appropriately completes the request.
11
 
 
12
 
13
+ ### Instruction:
14
 
15
+ {prompt}
16
 
17
+
18
+ ### Response:
19
 
20
  '
21
  quantized_by: TheBloke
 
67
  <!-- repositories-available end -->
68
 
69
  <!-- prompt-template start -->
70
+ ## Prompt template: Alpaca
71
 
72
  ```
73
+ Below is an instruction that describes a task. Write a response that appropriately completes the request.
74
+
75
+ ### Instruction:
76
+ {prompt}
77
+
78
+ ### Response:
79
 
80
  ```
81
 
 
253
  endpoint_url = "https://your-endpoint-url-here"
254
 
255
  prompt = "Tell me about AI"
256
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
257
+
258
+ ### Instruction:
259
+ {prompt}
260
+
261
+ ### Response:
262
  '''
263
 
264
  client = InferenceClient(endpoint_url)
 
314
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
315
 
316
  prompt = "Tell me about AI"
317
+ prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
318
+
319
+ ### Instruction:
320
+ {prompt}
321
+
322
+ ### Response:
323
  '''
324
 
325
  print("\n\n*** Generate:")