aaronday3 commited on
Commit
75daacd
1 Parent(s): 856c970

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +135 -1265
README.md CHANGED
@@ -1,1288 +1,158 @@
1
  ---
2
- language:
3
- - en
4
- - de
5
- - fr
6
- - it
7
- - pt
8
- - hi
9
- - es
10
- - th
11
- library_name: transformers
12
  license: llama3.1
13
- pipeline_tag: text-generation
14
- tags:
15
- - facebook
16
- - meta
17
- - pytorch
18
- - llama
19
- - llama-3
20
- extra_gated_prompt: "### LLAMA 3.1 COMMUNITY LICENSE AGREEMENT\nLlama 3.1 Version\
21
- \ Release Date: July 23, 2024\n\"Agreement\" means the terms and conditions for\
22
- \ use, reproduction, distribution and modification of the Llama Materials set forth\
23
- \ herein.\n\"Documentation\" means the specifications, manuals and documentation\
24
- \ accompanying Llama 3.1 distributed by Meta at https://llama.meta.com/doc/overview.\n\
25
- \"Licensee\" or \"you\" means you, or your employer or any other person or entity\
26
- \ (if you are entering into this Agreement on such person or entity’s behalf), of\
27
- \ the age required under applicable laws, rules or regulations to provide legal\
28
- \ consent and that has legal authority to bind your employer or such other person\
29
- \ or entity if you are entering in this Agreement on their behalf.\n\"Llama 3.1\"\
30
- \ means the foundational large language models and software and algorithms, including\
31
- \ machine-learning model code, trained model weights, inference-enabling code, training-enabling\
32
- \ code, fine-tuning enabling code and other elements of the foregoing distributed\
33
- \ by Meta at https://llama.meta.com/llama-downloads.\n\"Llama Materials\" means,\
34
- \ collectively, Meta’s proprietary Llama 3.1 and Documentation (and any portion\
35
- \ thereof) made available under this Agreement.\n\"Meta\" or \"we\" means Meta Platforms\
36
- \ Ireland Limited (if you are located in or, if you are an entity, your principal\
37
- \ place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you\
38
- \ are located outside of the EEA or Switzerland).\n \n1. License Rights and Redistribution.\n\
39
- a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable\
40
- \ and royalty-free limited license under Meta’s intellectual property or other rights\
41
- \ owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy,\
42
- \ create derivative works of, and make modifications to the Llama Materials.\nb.\
43
- \ Redistribution and Use.\ni. If you distribute or make available the Llama Materials\
44
- \ (or any derivative works thereof), or a product or service (including another\
45
- \ AI model) that contains any of them, you shall (A) provide a copy of this Agreement\
46
- \ with any such Llama Materials; and (B) prominently display “Built with Llama”\
47
- \ on a related website, user interface, blogpost, about page, or product documentation.\
48
- \ If you use the Llama Materials or any outputs or results of the Llama Materials\
49
- \ to create, train, fine tune, or otherwise improve an AI model, which is distributed\
50
- \ or made available, you shall also include “Llama” at the beginning of any such\
51
- \ AI model name.\nii. If you receive Llama Materials, or any derivative works thereof,\
52
- \ from a Licensee as part of an integrated end user product, then Section 2 of\
53
- \ this Agreement will not apply to you.\niii. You must retain in all copies of the\
54
- \ Llama Materials that you distribute the following attribution notice within a\
55
- \ “Notice” text file distributed as a part of such copies: “Llama 3.1 is licensed\
56
- \ under the Llama 3.1 Community License, Copyright © Meta Platforms, Inc. All Rights\
57
- \ Reserved.”\niv. Your use of the Llama Materials must comply with applicable laws\
58
- \ and regulations (including trade compliance laws and regulations) and adhere to\
59
- \ the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3_1/use-policy),\
60
- \ which is hereby incorporated by reference into this Agreement.\n2. Additional\
61
- \ Commercial Terms. If, on the Llama 3.1 version release date, the monthly active\
62
- \ users of the products or services made available by or for Licensee, or Licensee’s\
63
- \ affiliates, is greater than 700 million monthly active users in the preceding\
64
- \ calendar month, you must request a license from Meta, which Meta may grant to\
65
- \ you in its sole discretion, and you are not authorized to exercise any of the\
66
- \ rights under this Agreement unless or until Meta otherwise expressly grants you\
67
- \ such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE\
68
- \ LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS”\
69
- \ BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY\
70
- \ KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES\
71
- \ OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.\
72
- \ YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\
73
- \ THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA\
74
- \ MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT\
75
- \ WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN\
76
- \ CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS\
77
- \ AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL,\
78
- \ EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED\
79
- \ OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\na. No\
80
- \ trademark licenses are granted under this Agreement, and in connection with the\
81
- \ Llama Materials, neither Meta nor Licensee may use any name or mark owned by or\
82
- \ associated with the other or any of its affiliates, except as required for reasonable\
83
- \ and customary use in describing and redistributing the Llama Materials or as set\
84
- \ forth in this Section 5(a). Meta hereby grants you a license to use “Llama” (the\
85
- \ “Mark”) solely as required to comply with the last sentence of Section 1.b.i.\
86
- \ You will comply with Meta’s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/\
87
- \ ). All goodwill arising out of your use of the Mark will inure to the benefit\
88
- \ of Meta.\nb. Subject to Meta’s ownership of Llama Materials and derivatives made\
89
- \ by or for Meta, with respect to any derivative works and modifications of the\
90
- \ Llama Materials that are made by you, as between you and Meta, you are and will\
91
- \ be the owner of such derivative works and modifications.\nc. If you institute\
92
- \ litigation or other proceedings against Meta or any entity (including a cross-claim\
93
- \ or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 3.1 outputs\
94
- \ or results, or any portion of any of the foregoing, constitutes infringement of\
95
- \ intellectual property or other rights owned or licensable by you, then any licenses\
96
- \ granted to you under this Agreement shall terminate as of the date such litigation\
97
- \ or claim is filed or instituted. You will indemnify and hold harmless Meta from\
98
- \ and against any claim by any third party arising out of or related to your use\
99
- \ or distribution of the Llama Materials.\n6. Term and Termination. The term of\
100
- \ this Agreement will commence upon your acceptance of this Agreement or access\
101
- \ to the Llama Materials and will continue in full force and effect until terminated\
102
- \ in accordance with the terms and conditions herein. Meta may terminate this Agreement\
103
- \ if you are in breach of any term or condition of this Agreement. Upon termination\
104
- \ of this Agreement, you shall delete and cease use of the Llama Materials. Sections\
105
- \ 3, 4 and 7 shall survive the termination of this Agreement.\n7. Governing Law\
106
- \ and Jurisdiction. This Agreement will be governed and construed under the laws\
107
- \ of the State of California without regard to choice of law principles, and the\
108
- \ UN Convention on Contracts for the International Sale of Goods does not apply\
109
- \ to this Agreement. The courts of California shall have exclusive jurisdiction\
110
- \ of any dispute arising out of this Agreement.\n### Llama 3.1 Acceptable Use Policy\n\
111
- Meta is committed to promoting safe and fair use of its tools and features, including\
112
- \ Llama 3.1. If you access or use Llama 3.1, you agree to this Acceptable Use Policy\
113
- \ (“Policy”). The most recent copy of this policy can be found at [https://llama.meta.com/llama3_1/use-policy](https://llama.meta.com/llama3_1/use-policy)\n\
114
- #### Prohibited Uses\nWe want everyone to use Llama 3.1 safely and responsibly.\
115
- \ You agree you will not use, or allow others to use, Llama 3.1 to:\n 1. Violate\
116
- \ the law or others’ rights, including to:\n 1. Engage in, promote, generate,\
117
- \ contribute to, encourage, plan, incite, or further illegal or unlawful activity\
118
- \ or content, such as:\n 1. Violence or terrorism\n 2. Exploitation\
119
- \ or harm to children, including the solicitation, creation, acquisition, or dissemination\
120
- \ of child exploitative content or failure to report Child Sexual Abuse Material\n\
121
- \ 3. Human trafficking, exploitation, and sexual violence\n 4. The\
122
- \ illegal distribution of information or materials to minors, including obscene\
123
- \ materials, or failure to employ legally required age-gating in connection with\
124
- \ such information or materials.\n 5. Sexual solicitation\n 6. Any\
125
- \ other criminal activity\n 3. Engage in, promote, incite, or facilitate the\
126
- \ harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\
127
- \ 4. Engage in, promote, incite, or facilitate discrimination or other unlawful\
128
- \ or harmful conduct in the provision of employment, employment benefits, credit,\
129
- \ housing, other economic benefits, or other essential goods and services\n 5.\
130
- \ Engage in the unauthorized or unlicensed practice of any profession including,\
131
- \ but not limited to, financial, legal, medical/health, or related professional\
132
- \ practices\n 6. Collect, process, disclose, generate, or infer health, demographic,\
133
- \ or other sensitive personal or private information about individuals without rights\
134
- \ and consents required by applicable laws\n 7. Engage in or facilitate any action\
135
- \ or generate any content that infringes, misappropriates, or otherwise violates\
136
- \ any third-party rights, including the outputs or results of any products or services\
137
- \ using the Llama Materials\n 8. Create, generate, or facilitate the creation\
138
- \ of malicious code, malware, computer viruses or do anything else that could disable,\
139
- \ overburden, interfere with or impair the proper working, integrity, operation\
140
- \ or appearance of a website or computer system\n2. Engage in, promote, incite,\
141
- \ facilitate, or assist in the planning or development of activities that present\
142
- \ a risk of death or bodily harm to individuals, including use of Llama 3.1 related\
143
- \ to the following:\n 1. Military, warfare, nuclear industries or applications,\
144
- \ espionage, use for materials or activities that are subject to the International\
145
- \ Traffic Arms Regulations (ITAR) maintained by the United States Department of\
146
- \ State\n 2. Guns and illegal weapons (including weapon development)\n 3.\
147
- \ Illegal drugs and regulated/controlled substances\n 4. Operation of critical\
148
- \ infrastructure, transportation technologies, or heavy machinery\n 5. Self-harm\
149
- \ or harm to others, including suicide, cutting, and eating disorders\n 6. Any\
150
- \ content intended to incite or promote violence, abuse, or any infliction of bodily\
151
- \ harm to an individual\n3. Intentionally deceive or mislead others, including use\
152
- \ of Llama 3.1 related to the following:\n 1. Generating, promoting, or furthering\
153
- \ fraud or the creation or promotion of disinformation\n 2. Generating, promoting,\
154
- \ or furthering defamatory content, including the creation of defamatory statements,\
155
- \ images, or other content\n 3. Generating, promoting, or further distributing\
156
- \ spam\n 4. Impersonating another individual without consent, authorization,\
157
- \ or legal right\n 5. Representing that the use of Llama 3.1 or outputs are human-generated\n\
158
- \ 6. Generating or facilitating false online engagement, including fake reviews\
159
- \ and other means of fake online engagement\n4. Fail to appropriately disclose to\
160
- \ end users any known dangers of your AI system\nPlease report any violation of\
161
- \ this Policy, software “bug,” or other problems that could lead to a violation\
162
- \ of this Policy through one of the following means:\n * Reporting issues with\
163
- \ the model: [https://github.com/meta-llama/llama-models/issues](https://github.com/meta-llama/llama-models/issues)\n\
164
- \ * Reporting risky content generated by the model:\n developers.facebook.com/llama_output_feedback\n\
165
- \ * Reporting bugs and security concerns: facebook.com/whitehat/info\n * Reporting\
166
- \ violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: [email protected]"
167
- extra_gated_fields:
168
- First Name: text
169
- Last Name: text
170
- Date of birth: date_picker
171
- Country: country
172
- Affiliation: text
173
- Job title:
174
- type: select
175
- options:
176
- - Student
177
- - Research Graduate
178
- - AI researcher
179
- - AI developer/engineer
180
- - Reporter
181
- - Other
182
- geo: ip_location
183
- ? By clicking Submit below I accept the terms of the license and acknowledge that
184
- the information I provide will be collected stored processed and shared in accordance
185
- with the Meta Privacy Policy
186
- : checkbox
187
- extra_gated_description: The information you provide will be collected, stored, processed
188
- and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).
189
- extra_gated_button_content: Submit
190
  ---
191
 
192
- ## Model Information
193
 
194
- The Meta Llama 3.1 collection of multilingual large language models (LLMs) is a collection of pretrained and instruction tuned generative models in 8B, 70B and 405B sizes (text in/text out). The Llama 3.1 instruction tuned text only models (8B, 70B, 405B) are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.
195
 
196
- **Model developer**: Meta
197
 
198
- **Model Architecture:** Llama 3.1 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety.
199
 
 
200
 
201
- <table>
202
- <tr>
203
- <td>
204
- </td>
205
- <td><strong>Training Data</strong>
206
- </td>
207
- <td><strong>Params</strong>
208
- </td>
209
- <td><strong>Input modalities</strong>
210
- </td>
211
- <td><strong>Output modalities</strong>
212
- </td>
213
- <td><strong>Context length</strong>
214
- </td>
215
- <td><strong>GQA</strong>
216
- </td>
217
- <td><strong>Token count</strong>
218
- </td>
219
- <td><strong>Knowledge cutoff</strong>
220
- </td>
221
- </tr>
222
- <tr>
223
- <td rowspan="3" >Llama 3.1 (text only)
224
- </td>
225
- <td rowspan="3" >A new mix of publicly available online data.
226
- </td>
227
- <td>8B
228
- </td>
229
- <td>Multilingual Text
230
- </td>
231
- <td>Multilingual Text and code
232
- </td>
233
- <td>128k
234
- </td>
235
- <td>Yes
236
- </td>
237
- <td rowspan="3" >15T+
238
- </td>
239
- <td rowspan="3" >December 2023
240
- </td>
241
- </tr>
242
- <tr>
243
- <td>70B
244
- </td>
245
- <td>Multilingual Text
246
- </td>
247
- <td>Multilingual Text and code
248
- </td>
249
- <td>128k
250
- </td>
251
- <td>Yes
252
- </td>
253
- </tr>
254
- <tr>
255
- <td>405B
256
- </td>
257
- <td>Multilingual Text
258
- </td>
259
- <td>Multilingual Text and code
260
- </td>
261
- <td>128k
262
- </td>
263
- <td>Yes
264
- </td>
265
- </tr>
266
- </table>
267
 
 
268
 
269
- **Supported languages:** English, German, French, Italian, Portuguese, Hindi, Spanish, and Thai.
270
 
271
- **Llama 3.1 family of models**. Token counts refer to pretraining data only. All model versions use Grouped-Query Attention (GQA) for improved inference scalability.
272
 
273
- **Model Release Date:** July 23, 2024.
274
 
275
- **Status:** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
276
 
277
- **License:** A custom commercial license, the Llama 3.1 Community License, is available at: [https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE](https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE)
278
 
279
- Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3.1 in applications, please go [here](https://github.com/meta-llama/llama-recipes).
280
 
 
281
 
282
- ## Intended Use
283
-
284
- **Intended Use Cases** Llama 3.1 is intended for commercial and research use in multiple languages. Instruction tuned text only models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. The Llama 3.1 model collection also supports the ability to leverage the outputs of its models to improve other models including synthetic data generation and distillation. The Llama 3.1 Community License allows for these use cases.
285
-
286
- **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3.1 Community License. Use in languages beyond those explicitly referenced as supported in this model card**.
287
-
288
- **<span style="text-decoration:underline;">Note</span>: Llama 3.1 has been trained on a broader collection of languages than the 8 supported languages. Developers may fine-tune Llama 3.1 models for languages beyond the 8 supported languages provided they comply with the Llama 3.1 Community License and the Acceptable Use Policy and in such cases are responsible for ensuring that any uses of Llama 3.1 in additional languages is done in a safe and responsible manner.
289
-
290
- ## How to use
291
-
292
- This repository contains two versions of Meta-Llama-3.1-70B-Instruct, for use with transformers and with the original `llama` codebase.
293
-
294
- ### Use with transformers
295
-
296
- Starting with `transformers >= 4.43.0` onward, you can run conversational inference using the Transformers `pipeline` abstraction or by leveraging the Auto classes with the `generate()` function.
297
-
298
- Make sure to update your transformers installation via `pip install --upgrade transformers`.
299
-
300
- See the snippet below for usage with Transformers:
301
-
302
- ```python
303
- import transformers
304
- import torch
305
-
306
- model_id = "meta-llama/Meta-Llama-3.1-70B-Instruct"
307
-
308
- pipeline = transformers.pipeline(
309
- "text-generation",
310
- model=model_id,
311
- model_kwargs={"torch_dtype": torch.bfloat16},
312
- device_map="auto",
313
- )
314
-
315
- messages = [
316
- {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
317
- {"role": "user", "content": "Who are you?"},
318
- ]
319
-
320
- outputs = pipeline(
321
- messages,
322
- max_new_tokens=256,
323
- )
324
- print(outputs[0]["generated_text"][-1])
325
- ```
326
-
327
- ### Tool use with transformers
328
-
329
- LLaMA-3.1 supports multiple tool use formats. You can see a full guide to prompt formatting [here](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/).
330
-
331
- Tool use is also supported through [chat templates](https://huggingface.co/docs/transformers/main/chat_templating#advanced-tool-use--function-calling) in Transformers.
332
- Here is a quick example showing a single simple tool:
333
-
334
- ```python
335
- # First, define a tool
336
- def get_current_temperature(location: str) -> float:
337
- """
338
- Get the current temperature at a location.
339
-
340
- Args:
341
- location: The location to get the temperature for, in the format "City, Country"
342
- Returns:
343
- The current temperature at the specified location in the specified units, as a float.
344
- """
345
- return 22. # A real function should probably actually get the temperature!
346
-
347
- # Next, create a chat and apply the chat template
348
- messages = [
349
- {"role": "system", "content": "You are a bot that responds to weather queries."},
350
- {"role": "user", "content": "Hey, what's the temperature in Paris right now?"}
351
- ]
352
-
353
- inputs = tokenizer.apply_chat_template(messages, tools=[get_current_temperature], add_generation_prompt=True)
354
- ```
355
-
356
- You can then generate text from this input as normal. If the model generates a tool call, you should add it to the chat like so:
357
-
358
- ```python
359
- tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France"}}
360
- messages.append({"role": "assistant", "tool_calls": [{"type": "function", "function": tool_call}]})
361
- ```
362
-
363
- and then call the tool and append the result, with the `tool` role, like so:
364
-
365
- ```python
366
- messages.append({"role": "tool", "name": "get_current_temperature", "content": "22.0"})
367
- ```
368
-
369
- After that, you can `generate()` again to let the model use the tool result in the chat. Note that this was a very brief introduction to tool calling - for more information,
370
- see the [LLaMA prompt format docs](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/) and the Transformers [tool use documentation](https://huggingface.co/docs/transformers/main/chat_templating#advanced-tool-use--function-calling).
371
-
372
-
373
- ### Use with `bitsandbytes`
374
-
375
- The model checkpoints can be used in `8-bit` and `4-bit` for further memory optimisations using `bitsandbytes` and `transformers`
376
-
377
- See the snippet below for usage:
378
-
379
- ```python
380
- import torch
381
- from transformers import AutoModelForCausalLM, AutoTokenizer
382
-
383
- model_id = "meta-llama/Meta-Llama-3.1-70B-Instruct"
384
- quantization_config = BitsAndBytesConfig(load_in_8bit=True)
385
-
386
- quantized_model = AutoModelForCausalLM.from_pretrained(
387
- model_id, device_map="auto", torch_dtype=torch.bfloat16, quantization_config=quantization_config)
388
-
389
- tokenizer = AutoTokenizer.from_pretrained(model_id)
390
- input_text = "What are we having for dinner?"
391
- input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
392
-
393
- output = quantized_model.generate(**input_ids, max_new_tokens=10)
394
-
395
- print(tokenizer.decode(output[0], skip_special_tokens=True))
396
- ```
397
-
398
- To load in 4-bit simply pass `load_in_4bit=True`
399
-
400
- ### Use with `llama`
401
-
402
- Please, follow the instructions in the [repository](https://github.com/meta-llama/llama).
403
-
404
- To download Original checkpoints, see the example command below leveraging `huggingface-cli`:
405
-
406
- ```
407
- huggingface-cli download meta-llama/Meta-Llama-3.1-70B-Instruct --include "original/*" --local-dir Meta-Llama-3.1-70B-Instruct
408
- ```
409
-
410
-
411
- ## Hardware and Software
412
-
413
- **Training Factors** We used custom training libraries, Meta's custom built GPU cluster, and production infrastructure for pretraining. Fine-tuning, annotation, and evaluation were also performed on production infrastructure.
414
-
415
- **Training utilized a cumulative of** 39.3M GPU hours of computation on H100-80GB (TDP of 700W) type hardware, per the table below. Training time is the total GPU time required for training each model and power consumption is the peak power capacity per GPU device used, adjusted for power usage efficiency.
416
-
417
-
418
- **Training Greenhouse Gas Emissions** Estimated total location-based greenhouse gas emissions were **11,390** tons CO2eq for training. Since 2020, Meta has maintained net zero greenhouse gas emissions in its global operations and matched 100% of its electricity use with renewable energy, therefore the total market-based greenhouse gas emissions for training were 0 tons CO2eq.
419
-
420
-
421
- <table>
422
- <tr>
423
- <td>
424
- </td>
425
- <td><strong>Training Time (GPU hours)</strong>
426
- </td>
427
- <td><strong>Training Power Consumption (W)</strong>
428
- </td>
429
- <td><strong>Training Location-Based Greenhouse Gas Emissions</strong>
430
- <p>
431
- <strong>(tons CO2eq)</strong>
432
- </td>
433
- <td><strong>Training Market-Based Greenhouse Gas Emissions</strong>
434
- <p>
435
- <strong>(tons CO2eq)</strong>
436
- </td>
437
- </tr>
438
- <tr>
439
- <td>Llama 3.1 8B
440
- </td>
441
- <td>1.46M
442
- </td>
443
- <td>700
444
- </td>
445
- <td>420
446
- </td>
447
- <td>0
448
- </td>
449
- </tr>
450
- <tr>
451
- <td>Llama 3.1 70B
452
- </td>
453
- <td>7.0M
454
- </td>
455
- <td>700
456
- </td>
457
- <td>2,040
458
- </td>
459
- <td>0
460
- </td>
461
- </tr>
462
- <tr>
463
- <td>Llama 3.1 405B
464
- </td>
465
- <td>30.84M
466
- </td>
467
- <td>700
468
- </td>
469
- <td>8,930
470
- </td>
471
- <td>0
472
- </td>
473
- </tr>
474
- <tr>
475
- <td>Total
476
- </td>
477
- <td>39.3M
478
- <td>
479
- <ul>
480
-
481
- </ul>
482
- </td>
483
- <td>11,390
484
- </td>
485
- <td>0
486
- </td>
487
- </tr>
488
- </table>
489
-
490
-
491
-
492
- The methodology used to determine training energy use and greenhouse gas emissions can be found [here](https://arxiv.org/pdf/2204.05149). Since Meta is openly releasing these models, the training energy use and greenhouse gas emissions will not be incurred by others.
493
-
494
-
495
- ## Training Data
496
-
497
- **Overview:** Llama 3.1 was pretrained on ~15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 25M synthetically generated examples.
498
-
499
- **Data Freshness:** The pretraining data has a cutoff of December 2023.
500
-
501
-
502
- ## Benchmark scores
503
-
504
- In this section, we report the results for Llama 3.1 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library.
505
-
506
- ### Base pretrained models
507
-
508
-
509
- <table>
510
- <tr>
511
- <td><strong>Category</strong>
512
- </td>
513
- <td><strong>Benchmark</strong>
514
- </td>
515
- <td><strong># Shots</strong>
516
- </td>
517
- <td><strong>Metric</strong>
518
- </td>
519
- <td><strong>Llama 3 8B</strong>
520
- </td>
521
- <td><strong>Llama 3.1 8B</strong>
522
- </td>
523
- <td><strong>Llama 3 70B</strong>
524
- </td>
525
- <td><strong>Llama 3.1 70B</strong>
526
- </td>
527
- <td><strong>Llama 3.1 405B</strong>
528
- </td>
529
- </tr>
530
- <tr>
531
- <td rowspan="7" >General
532
- </td>
533
- <td>MMLU
534
- </td>
535
- <td>5
536
- </td>
537
- <td>macro_avg/acc_char
538
- </td>
539
- <td>66.7
540
- </td>
541
- <td>66.7
542
- </td>
543
- <td>79.5
544
- </td>
545
- <td>79.3
546
- </td>
547
- <td>85.2
548
- </td>
549
- </tr>
550
- <tr>
551
- <td>MMLU-Pro (CoT)
552
- </td>
553
- <td>5
554
- </td>
555
- <td>macro_avg/acc_char
556
- </td>
557
- <td>36.2
558
- </td>
559
- <td>37.1
560
- </td>
561
- <td>55.0
562
- </td>
563
- <td>53.8
564
- </td>
565
- <td>61.6
566
- </td>
567
- </tr>
568
- <tr>
569
- <td>AGIEval English
570
- </td>
571
- <td>3-5
572
- </td>
573
- <td>average/acc_char
574
- </td>
575
- <td>47.1
576
- </td>
577
- <td>47.8
578
- </td>
579
- <td>63.0
580
- </td>
581
- <td>64.6
582
- </td>
583
- <td>71.6
584
- </td>
585
- </tr>
586
- <tr>
587
- <td>CommonSenseQA
588
- </td>
589
- <td>7
590
- </td>
591
- <td>acc_char
592
- </td>
593
- <td>72.6
594
- </td>
595
- <td>75.0
596
- </td>
597
- <td>83.8
598
- </td>
599
- <td>84.1
600
- </td>
601
- <td>85.8
602
- </td>
603
- </tr>
604
- <tr>
605
- <td>Winogrande
606
- </td>
607
- <td>5
608
- </td>
609
- <td>acc_char
610
- </td>
611
- <td>-
612
- </td>
613
- <td>60.5
614
- </td>
615
- <td>-
616
- </td>
617
- <td>83.3
618
- </td>
619
- <td>86.7
620
- </td>
621
- </tr>
622
- <tr>
623
- <td>BIG-Bench Hard (CoT)
624
- </td>
625
- <td>3
626
- </td>
627
- <td>average/em
628
- </td>
629
- <td>61.1
630
- </td>
631
- <td>64.2
632
- </td>
633
- <td>81.3
634
- </td>
635
- <td>81.6
636
- </td>
637
- <td>85.9
638
- </td>
639
- </tr>
640
- <tr>
641
- <td>ARC-Challenge
642
- </td>
643
- <td>25
644
- </td>
645
- <td>acc_char
646
- </td>
647
- <td>79.4
648
- </td>
649
- <td>79.7
650
- </td>
651
- <td>93.1
652
- </td>
653
- <td>92.9
654
- </td>
655
- <td>96.1
656
- </td>
657
- </tr>
658
- <tr>
659
- <td>Knowledge reasoning
660
- </td>
661
- <td>TriviaQA-Wiki
662
- </td>
663
- <td>5
664
- </td>
665
- <td>em
666
- </td>
667
- <td>78.5
668
- </td>
669
- <td>77.6
670
- </td>
671
- <td>89.7
672
- </td>
673
- <td>89.8
674
- </td>
675
- <td>91.8
676
- </td>
677
- </tr>
678
- <tr>
679
- <td rowspan="4" >Reading comprehension
680
- </td>
681
- <td>SQuAD
682
- </td>
683
- <td>1
684
- </td>
685
- <td>em
686
- </td>
687
- <td>76.4
688
- </td>
689
- <td>77.0
690
- </td>
691
- <td>85.6
692
- </td>
693
- <td>81.8
694
- </td>
695
- <td>89.3
696
- </td>
697
- </tr>
698
- <tr>
699
- <td>QuAC (F1)
700
- </td>
701
- <td>1
702
- </td>
703
- <td>f1
704
- </td>
705
- <td>44.4
706
- </td>
707
- <td>44.9
708
- </td>
709
- <td>51.1
710
- </td>
711
- <td>51.1
712
- </td>
713
- <td>53.6
714
- </td>
715
- </tr>
716
- <tr>
717
- <td>BoolQ
718
- </td>
719
- <td>0
720
- </td>
721
- <td>acc_char
722
- </td>
723
- <td>75.7
724
- </td>
725
- <td>75.0
726
- </td>
727
- <td>79.0
728
- </td>
729
- <td>79.4
730
- </td>
731
- <td>80.0
732
- </td>
733
- </tr>
734
- <tr>
735
- <td>DROP (F1)
736
- </td>
737
- <td>3
738
- </td>
739
- <td>f1
740
- </td>
741
- <td>58.4
742
- </td>
743
- <td>59.5
744
- </td>
745
- <td>79.7
746
- </td>
747
- <td>79.6
748
- </td>
749
- <td>84.8
750
- </td>
751
- </tr>
752
- </table>
753
-
754
-
755
-
756
- ### Instruction tuned models
757
-
758
-
759
- <table>
760
- <tr>
761
- <td><strong>Category</strong>
762
- </td>
763
- <td><strong>Benchmark</strong>
764
- </td>
765
- <td><strong># Shots</strong>
766
- </td>
767
- <td><strong>Metric</strong>
768
- </td>
769
- <td><strong>Llama 3 8B Instruct</strong>
770
- </td>
771
- <td><strong>Llama 3.1 8B Instruct</strong>
772
- </td>
773
- <td><strong>Llama 3 70B Instruct</strong>
774
- </td>
775
- <td><strong>Llama 3.1 70B Instruct</strong>
776
- </td>
777
- <td><strong>Llama 3.1 405B Instruct</strong>
778
- </td>
779
- </tr>
780
- <tr>
781
- <td rowspan="4" >General
782
- </td>
783
- <td>MMLU
784
- </td>
785
- <td>5
786
- </td>
787
- <td>macro_avg/acc
788
- </td>
789
- <td>68.5
790
- </td>
791
- <td>69.4
792
- </td>
793
- <td>82.0
794
- </td>
795
- <td>83.6
796
- </td>
797
- <td>87.3
798
- </td>
799
- </tr>
800
- <tr>
801
- <td>MMLU (CoT)
802
- </td>
803
- <td>0
804
- </td>
805
- <td>macro_avg/acc
806
- </td>
807
- <td>65.3
808
- </td>
809
- <td>73.0
810
- </td>
811
- <td>80.9
812
- </td>
813
- <td>86.0
814
- </td>
815
- <td>88.6
816
- </td>
817
- </tr>
818
- <tr>
819
- <td>MMLU-Pro (CoT)
820
- </td>
821
- <td>5
822
- </td>
823
- <td>micro_avg/acc_char
824
- </td>
825
- <td>45.5
826
- </td>
827
- <td>48.3
828
- </td>
829
- <td>63.4
830
- </td>
831
- <td>66.4
832
- </td>
833
- <td>73.3
834
- </td>
835
- </tr>
836
- <tr>
837
- <td>IFEval
838
- </td>
839
- <td>
840
- </td>
841
- <td>
842
- </td>
843
- <td>76.8
844
- </td>
845
- <td>80.4
846
- </td>
847
- <td>82.9
848
- </td>
849
- <td>87.5
850
- </td>
851
- <td>88.6
852
- </td>
853
- </tr>
854
- <tr>
855
- <td rowspan="2" >Reasoning
856
- </td>
857
- <td>ARC-C
858
- </td>
859
- <td>0
860
- </td>
861
- <td>acc
862
- </td>
863
- <td>82.4
864
- </td>
865
- <td>83.4
866
- </td>
867
- <td>94.4
868
- </td>
869
- <td>94.8
870
- </td>
871
- <td>96.9
872
- </td>
873
- </tr>
874
- <tr>
875
- <td>GPQA
876
- </td>
877
- <td>0
878
- </td>
879
- <td>em
880
- </td>
881
- <td>34.6
882
- </td>
883
- <td>30.4
884
- </td>
885
- <td>39.5
886
- </td>
887
- <td>41.7
888
- </td>
889
- <td>50.7
890
- </td>
891
- </tr>
892
- <tr>
893
- <td rowspan="4" >Code
894
- </td>
895
- <td>HumanEval
896
- </td>
897
- <td>0
898
- </td>
899
- <td>pass@1
900
- </td>
901
- <td>60.4
902
- </td>
903
- <td>72.6
904
- </td>
905
- <td>81.7
906
- </td>
907
- <td>80.5
908
- </td>
909
- <td>89.0
910
- </td>
911
- </tr>
912
- <tr>
913
- <td>MBPP ++ base version
914
- </td>
915
- <td>0
916
- </td>
917
- <td>pass@1
918
- </td>
919
- <td>70.6
920
- </td>
921
- <td>72.8
922
- </td>
923
- <td>82.5
924
- </td>
925
- <td>86.0
926
- </td>
927
- <td>88.6
928
- </td>
929
- </tr>
930
- <tr>
931
- <td>Multipl-E HumanEval
932
- </td>
933
- <td>0
934
- </td>
935
- <td>pass@1
936
- </td>
937
- <td>-
938
- </td>
939
- <td>50.8
940
- </td>
941
- <td>-
942
- </td>
943
- <td>65.5
944
- </td>
945
- <td>75.2
946
- </td>
947
- </tr>
948
- <tr>
949
- <td>Multipl-E MBPP
950
- </td>
951
- <td>0
952
- </td>
953
- <td>pass@1
954
- </td>
955
- <td>-
956
- </td>
957
- <td>52.4
958
- </td>
959
- <td>-
960
- </td>
961
- <td>62.0
962
- </td>
963
- <td>65.7
964
- </td>
965
- </tr>
966
- <tr>
967
- <td rowspan="2" >Math
968
- </td>
969
- <td>GSM-8K (CoT)
970
- </td>
971
- <td>8
972
- </td>
973
- <td>em_maj1@1
974
- </td>
975
- <td>80.6
976
- </td>
977
- <td>84.5
978
- </td>
979
- <td>93.0
980
- </td>
981
- <td>95.1
982
- </td>
983
- <td>96.8
984
- </td>
985
- </tr>
986
- <tr>
987
- <td>MATH (CoT)
988
- </td>
989
- <td>0
990
- </td>
991
- <td>final_em
992
- </td>
993
- <td>29.1
994
- </td>
995
- <td>51.9
996
- </td>
997
- <td>51.0
998
- </td>
999
- <td>68.0
1000
- </td>
1001
- <td>73.8
1002
- </td>
1003
- </tr>
1004
- <tr>
1005
- <td rowspan="4" >Tool Use
1006
- </td>
1007
- <td>API-Bank
1008
- </td>
1009
- <td>0
1010
- </td>
1011
- <td>acc
1012
- </td>
1013
- <td>48.3
1014
- </td>
1015
- <td>82.6
1016
- </td>
1017
- <td>85.1
1018
- </td>
1019
- <td>90.0
1020
- </td>
1021
- <td>92.0
1022
- </td>
1023
- </tr>
1024
- <tr>
1025
- <td>BFCL
1026
- </td>
1027
- <td>0
1028
- </td>
1029
- <td>acc
1030
- </td>
1031
- <td>60.3
1032
- </td>
1033
- <td>76.1
1034
- </td>
1035
- <td>83.0
1036
- </td>
1037
- <td>84.8
1038
- </td>
1039
- <td>88.5
1040
- </td>
1041
- </tr>
1042
- <tr>
1043
- <td>Gorilla Benchmark API Bench
1044
- </td>
1045
- <td>0
1046
- </td>
1047
- <td>acc
1048
- </td>
1049
- <td>1.7
1050
- </td>
1051
- <td>8.2
1052
- </td>
1053
- <td>14.7
1054
- </td>
1055
- <td>29.7
1056
- </td>
1057
- <td>35.3
1058
- </td>
1059
- </tr>
1060
- <tr>
1061
- <td>Nexus (0-shot)
1062
- </td>
1063
- <td>0
1064
- </td>
1065
- <td>macro_avg/acc
1066
- </td>
1067
- <td>18.1
1068
- </td>
1069
- <td>38.5
1070
- </td>
1071
- <td>47.8
1072
- </td>
1073
- <td>56.7
1074
- </td>
1075
- <td>58.7
1076
- </td>
1077
- </tr>
1078
- <tr>
1079
- <td>Multilingual
1080
- </td>
1081
- <td>Multilingual MGSM (CoT)
1082
- </td>
1083
- <td>0
1084
- </td>
1085
- <td>em
1086
- </td>
1087
- <td>-
1088
- </td>
1089
- <td>68.9
1090
- </td>
1091
- <td>-
1092
- </td>
1093
- <td>86.9
1094
- </td>
1095
- <td>91.6
1096
- </td>
1097
- </tr>
1098
- </table>
1099
-
1100
- #### Multilingual benchmarks
1101
-
1102
- <table>
1103
- <tr>
1104
- <td><strong>Category</strong>
1105
- </td>
1106
- <td><strong>Benchmark</strong>
1107
- </td>
1108
- <td><strong>Language</strong>
1109
- </td>
1110
- <td><strong>Llama 3.1 8B</strong>
1111
- </td>
1112
- <td><strong>Llama 3.1 70B</strong>
1113
- </td>
1114
- <td><strong>Llama 3.1 405B</strong>
1115
- </td>
1116
- </tr>
1117
- <tr>
1118
- <td rowspan="9" ><strong>General</strong>
1119
- </td>
1120
- <td rowspan="9" ><strong>MMLU (5-shot, macro_avg/acc)</strong>
1121
- </td>
1122
- <td>Portuguese
1123
- </td>
1124
- <td>62.12
1125
- </td>
1126
- <td>80.13
1127
- </td>
1128
- <td>84.95
1129
- </td>
1130
- </tr>
1131
- <tr>
1132
- <td>Spanish
1133
- </td>
1134
- <td>62.45
1135
- </td>
1136
- <td>80.05
1137
- </td>
1138
- <td>85.08
1139
- </td>
1140
- </tr>
1141
- <tr>
1142
- <td>Italian
1143
- </td>
1144
- <td>61.63
1145
- </td>
1146
- <td>80.4
1147
- </td>
1148
- <td>85.04
1149
- </td>
1150
- </tr>
1151
- <tr>
1152
- <td>German
1153
- </td>
1154
- <td>60.59
1155
- </td>
1156
- <td>79.27
1157
- </td>
1158
- <td>84.36
1159
- </td>
1160
- </tr>
1161
- <tr>
1162
- <td>French
1163
- </td>
1164
- <td>62.34
1165
- </td>
1166
- <td>79.82
1167
- </td>
1168
- <td>84.66
1169
- </td>
1170
- </tr>
1171
- <tr>
1172
- <td>Hindi
1173
- </td>
1174
- <td>50.88
1175
- </td>
1176
- <td>74.52
1177
- </td>
1178
- <td>80.31
1179
- </td>
1180
- </tr>
1181
- <tr>
1182
- <td>Thai
1183
- </td>
1184
- <td>50.32
1185
- </td>
1186
- <td>72.95
1187
- </td>
1188
- <td>78.21
1189
- </td>
1190
- </tr>
1191
- </table>
1192
-
1193
-
1194
-
1195
- ## Responsibility & Safety
1196
-
1197
- As part of our Responsible release approach, we followed a three-pronged strategy to managing trust & safety risks:
1198
-
1199
-
1200
-
1201
- * Enable developers to deploy helpful, safe and flexible experiences for their target audience and for the use cases supported by Llama.
1202
- * Protect developers against adversarial users aiming to exploit Llama capabilities to potentially cause harm.
1203
- * Provide protections for the community to help prevent the misuse of our models.
1204
-
1205
-
1206
- ### Responsible deployment
1207
-
1208
- Llama is a foundational technology designed to be used in a variety of use cases, examples on how Meta’s Llama models have been responsibly deployed can be found in our [Community Stories webpage](https://llama.meta.com/community-stories/). Our approach is to build the most helpful models enabling the world to benefit from the technology power, by aligning our model safety for the generic use cases addressing a standard set of harms. Developers are then in the driver seat to tailor safety for their use case, defining their own policy and deploying the models with the necessary safeguards in their Llama systems. Llama 3.1 was developed following the best practices outlined in our Responsible Use Guide, you can refer to the [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to learn more.
1209
-
1210
-
1211
- #### Llama 3.1 instruct
1212
-
1213
- Our main objectives for conducting safety fine-tuning are to provide the research community with a valuable resource for studying the robustness of safety fine-tuning, as well as to offer developers a readily available, safe, and powerful model for various applications to reduce the developer workload to deploy safe AI systems. For more details on the safety mitigations implemented please read the Llama 3 paper.
1214
-
1215
- **Fine-tuning data**
1216
-
1217
- We employ a multi-faceted approach to data collection, combining human-generated data from our vendors with synthetic data to mitigate potential safety risks. We’ve developed many large language model (LLM)-based classifiers that enable us to thoughtfully select high-quality prompts and responses, enhancing data quality control.
1218
-
1219
- **Refusals and Tone**
1220
-
1221
- Building on the work we started with Llama 3, we put a great emphasis on model refusals to benign prompts as well as refusal tone. We included both borderline and adversarial prompts in our safety data strategy, and modified our safety data responses to follow tone guidelines.
1222
-
1223
-
1224
- #### Llama 3.1 systems
1225
-
1226
- **Large language models, including Llama 3.1, are not designed to be deployed in isolation but instead should be deployed as part of an overall AI system with additional safety guardrails as required.** Developers are expected to deploy system safeguards when building agentic systems. Safeguards are key to achieve the right helpfulness-safety alignment as well as mitigating safety and security risks inherent to the system and any integration of the model or system with external tools.
1227
-
1228
- As part of our responsible release approach, we provide the community with [safeguards](https://llama.meta.com/trust-and-safety/) that developers should deploy with Llama models or other LLMs, including Llama Guard 3, Prompt Guard and Code Shield. All our [reference implementations](https://github.com/meta-llama/llama-agentic-system) demos contain these safeguards by default so developers can benefit from system-level safety out-of-the-box.
1229
-
1230
-
1231
- #### New capabilities
1232
-
1233
- Note that this release introduces new capabilities, including a longer context window, multilingual inputs and outputs and possible integrations by developers with third party tools. Building with these new capabilities requires specific considerations in addition to the best practices that generally apply across all Generative AI use cases.
1234
-
1235
- **Tool-use**: Just like in standard software development, developers are responsible for the integration of the LLM with the tools and services of their choice. They should define a clear policy for their use case and assess the integrity of the third party services they use to be aware of the safety and security limitations when using this capability. Refer to the Responsible Use Guide for best practices on the safe deployment of the third party safeguards.
1236
-
1237
- **Multilinguality**: Llama 3.1 supports 7 languages in addition to English: French, German, Hindi, Italian, Portuguese, Spanish, and Thai. Llama may be able to output text in other languages than those that meet performance thresholds for safety and helpfulness. We strongly discourage developers from using this model to converse in non-supported languages without implementing finetuning and system controls in alignment with their policies and the best practices shared in the Responsible Use Guide.
1238
-
1239
-
1240
- ### Evaluations
1241
-
1242
- We evaluated Llama models for common use cases as well as specific capabilities. Common use cases evaluations measure safety risks of systems for most commonly built applications including chat bot, coding assistant, tool calls. We built dedicated, adversarial evaluation datasets and evaluated systems composed of Llama models and Llama Guard 3 to filter input prompt and output response. It is important to evaluate applications in context, and we recommend building dedicated evaluation dataset for your use case. Prompt Guard and Code Shield are also available if relevant to the application.
1243
-
1244
- Capability evaluations measure vulnerabilities of Llama models inherent to specific capabilities, for which were crafted dedicated benchmarks including long context, multilingual, tools calls, coding or memorization.
1245
-
1246
- **Red teaming**
1247
-
1248
- For both scenarios, we conducted recurring red teaming exercises with the goal of discovering risks via adversarial prompting and we used the learnings to improve our benchmarks and safety tuning datasets.
1249
-
1250
- We partnered early with subject-matter experts in critical risk areas to understand the nature of these real-world harms and how such models may lead to unintended harm for society. Based on these conversations, we derived a set of adversarial goals for the red team to attempt to achieve, such as extracting harmful information or reprogramming the model to act in a potentially harmful capacity. The red team consisted of experts in cybersecurity, adversarial machine learning, responsible AI, and integrity in addition to multilingual content specialists with background in integrity issues in specific geographic markets.
1251
-
1252
-
1253
- ### Critical and other risks
1254
-
1255
- We specifically focused our efforts on mitigating the following critical risk areas:
1256
-
1257
- **1- CBRNE (Chemical, Biological, Radiological, Nuclear, and Explosive materials) helpfulness**
1258
-
1259
- To assess risks related to proliferation of chemical and biological weapons, we performed uplift testing designed to assess whether use of Llama 3.1 models could meaningfully increase the capabilities of malicious actors to plan or carry out attacks using these types of weapons.
1260
-
1261
-
1262
- **2. Child Safety**
1263
-
1264
- Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors including the additional languages Llama 3 is trained on. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences.
1265
-
1266
- **3. Cyber attack enablement**
1267
-
1268
- Our cyber attack uplift study investigated whether LLMs can enhance human capabilities in hacking tasks, both in terms of skill level and speed.
1269
-
1270
- Our attack automation study focused on evaluating the capabilities of LLMs when used as autonomous agents in cyber offensive operations, specifically in the context of ransomware attacks. This evaluation was distinct from previous studies that considered LLMs as interactive assistants. The primary objective was to assess whether these models could effectively function as independent agents in executing complex cyber-attacks without human intervention.
1271
-
1272
- Our study of Llama-3.1-405B’s social engineering uplift for cyber attackers was conducted to assess the effectiveness of AI models in aiding cyber threat actors in spear phishing campaigns. Please read our Llama 3.1 Cyber security whitepaper to learn more.
1273
-
1274
-
1275
- ### Community
1276
-
1277
- Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership on AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama).
1278
-
1279
- We also set up the [Llama Impact Grants](https://llama.meta.com/llama-impact-grants/) program to identify and support the most compelling applications of Meta’s Llama model for societal benefit across three categories: education, climate and open innovation. The 20 finalists from the hundreds of applications can be found [here](https://llama.meta.com/llama-impact-grants/#finalists).
1280
 
1281
- Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community.
 
 
 
 
 
 
 
 
1282
 
 
1283
 
1284
- ## Ethical Considerations and Limitations
1285
 
1286
- The core values of Llama 3.1 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3.1 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress.
1287
 
1288
- But Llama 3.1 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3.1’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3.1 models, developers should perform safety testing and tuning tailored to their specific applications of the model. Please refer to available resources including our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide), [Trust and Safety](https://llama.meta.com/trust-and-safety/) solutions, and other [resources](https://llama.meta.com/docs/get-started/) to learn more about responsible development.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
 
 
 
 
 
 
 
 
 
 
2
  license: llama3.1
3
+ datasets:
4
+ - nothingiisreal/Reddit-Dirty-And-WritingPrompts
5
+ - Nopm/Opus_WritingStruct
6
+ - kalomaze/Opus_Instruct_25k
7
+ - Gryphe/Sonnet3.5-SlimOrcaDedupCleaned
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  ---
9
 
10
+ Gate lifted, yay! People liked the model even though its a test model thats underfit, still cost us 80 USD tho lmao. FP8 is [here](https://huggingface.co/nothingiisreal/L3.1-70B-Celeste-V0.1-FP8)
11
 
12
+ Please do give the V1.9 card a read [here](https://huggingface.co/nothingiisreal/MN-12B-Celeste-V1.9)
13
 
14
+ Recommended system prompt is same as V1.9
15
 
16
+ 70B seems to have a bit more GPT-ish terminology than 12B, but also less slopping. It is still less than other 70Bs.
17
 
18
+ Temp 1.25 seems to improve the prose, recommended sampler:
19
 
20
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/630cf5d14ca0a22768bbe10c/5BkFd5FromVfT8ZeTml_2.png)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ It seems to be way more coherent and aware of whats going on as well as more intelligent.
23
 
24
+ The model seems to give out what you give in, sloppy card or first message leads to more of the same. The model is quite good at taking a human written card with stuff like conversational narration, and then continue that style.
25
 
26
+ It was trained on 4xH100 NVL for 6 hours using Lora+. I still want to train it further because it seems like the more data we put in, the better the model gets at writing and roleplaying.
27
 
28
+ Test and see I guess.
29
 
30
+ Me and my teammate are sick rn xD and I am currently working with another teammate on some good stuff, we can finally break away from AI generated datasets, at least for the most part. Once it is done, the 8B, 12B and 70B will be used with that dataset to train with. I hope we succeed at this, it will make me so, so happy.
31
 
32
+ We are also experimenting with RLHF, KTO and PPO mainly.
33
 
34
+ When we do a proper release, it will have a lot of writeup.
35
 
36
+ ---
37
 
38
+ Datasets used:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Name, sample size, whether to force RP format, whether to apply len limit (for the first message, seq len limit is always applied), unkown_boolean, minimum message count, system message
41
+ - Reddit WP
42
+ ["reddit_writing_prompts.jsonl", 0.4, True, True, False, 2, "Write a story based on prompt provided by user below. Mode: SFW"],<br>
43
+ - Instruct
44
+ ["combined_25k_HOTFIX_declauded_englishonly_sysprompt_name_swap.jsonl", 0.1, False, True, False, 2, ""],<br>
45
+ ["slim-orca.json", 0.1, False, True, False, 2, ""],
46
+ - Synth story
47
+ ["writing-struct-deslopped.json", 0.1, False, True, False, 2, ""],<br>
48
+ Claude RP 0.8
49
 
50
+ Thank you Nopm, Gryphe (double thanks), and kalomaze, and any other people involved in making those datasets. r/DirtyWritingPrompts was dropped because it would induce undesirable features. No worries though, NSFW will be stronger than ever lmao.
51
 
52
+ We used 10,000 rows, so take those ratios, normalise them so they add up to 1 and then that will be the division of the dataset. You can find all datasets by googling them, they are on huggingface, Claude RP is c2 logs but we filtered it ourselves.
53
 
54
+ ---
55
 
56
+ Axolotl Config:
57
+
58
+ ```yaml
59
+ # Model
60
+ base_model: meta-llama/Meta-Llama-3.1-70B-Instruct
61
+ model_type: LlamaForCausalLM
62
+ tokenizer_type: AutoTokenizer
63
+ trust_remote_code: true
64
+
65
+ # Output and HuggingFace
66
+ output_dir: /workspace/data/train-results/trained_model
67
+ hub_model_id:
68
+ hf_use_auth_token: true
69
+ hub_strategy: "all_checkpoints"
70
+
71
+ # WandB
72
+ wandb_project: huggingface
73
+ wandb_entity:
74
+
75
+ # Data
76
+ chat_template: llama3
77
+ train_on_inputs: false
78
+ group_by_length: true
79
+ datasets:
80
+ - path:
81
+ type: sharegpt
82
+ roles:
83
+ input:
84
+ - system
85
+ - user
86
+ output:
87
+ - assistant
88
+ ## Evaluation
89
+ val_set_size: 0.01
90
+ evals_per_epoch: 4
91
+ eval_table_size:
92
+ eval_max_new_tokens: 128
93
+
94
+ # Technical aspects
95
+ sequence_len: 8192
96
+ save_safetensors: true
97
+ saves_per_epoch: 2
98
+ logging_steps: 1
99
+ special_tokens:
100
+ pad_token: <|end_of_text|>
101
+
102
+ # Quantization
103
+ bf16: auto
104
+ fp16:
105
+ tf32: false
106
+ ## For LoRA
107
+ load_in_8bit: false
108
+ load_in_4bit: true
109
+
110
+ # LoRA
111
+ adapter: qlora # or qlora
112
+ lora_model_dir:
113
+ lora_r: 256
114
+ lora_alpha: 256
115
+ lora_dropout: 0.1
116
+ lora_target_linear: true
117
+ lora_fan_in_fan_out:
118
+ lora_target_modules:
119
+
120
+ loraplus_lr_ratio: 8
121
+ loraplus_lr_embedding:
122
+
123
+ # Training hyperparameters
124
+ # max_steps:
125
+ num_epochs: 1 # TODO Perhaps reduce this because LORA+ only needs 1 epoch.
126
+
127
+ # Anti Overfit and Stability
128
+ weight_decay: 0.01
129
+ max_grad_norm: 1.0 # Might increase this to 15 or something.
130
+
131
+ ## Learning Rate
132
+ warmup_ratio: 0.05
133
+ learning_rate: 0.000008
134
+ lr_scheduler: cosine_with_min_lr
135
+ lr_scheduler_kwargs:
136
+ min_lr: 0.0000024
137
+ optimizer: paged_adamw_8bit # usually adamw_torch or paged_adamw_8bit
138
+
139
+ ## Batch Size
140
+ gradient_accumulation_steps: 1
141
+ micro_batch_size: 1 # Batch size per gpu = micro_batch_size * gradient_accumulation_steps
142
+ eval_batch_size: 1
143
+
144
+ # Optimizations
145
+ pad_to_sequence_len: true
146
+ sample_packing: true
147
+ eval_sample_packing: true
148
+ flash_attention: true
149
+ xformers_attention:
150
+ gradient_checkpointing: "unsloth"
151
+ gradient_checkpointing_kwargs:
152
+ use_reentrant: true
153
+ local_rank:
154
+ deepspeed: /workspace/axolotl/deepspeed_configs/zero3_bf16.json # Only use with multi gpu # _bf16_cpuoffload_all
155
+ # Misc
156
+ early_stopping_patience:
157
+ debug:
158
+ ```