Datasets:

Languages:
English
ArXiv:
License:
josh-sematic commited on
Commit
64c1db1
0 Parent(s):

Duplicate from airtrain-ai/fineweb-edu-fortified

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. README.md +609 -0
  3. data/CC-MAIN-2013-20/train-00000-of-00024.parquet +3 -0
  4. data/CC-MAIN-2013-20/train-00001-of-00024.parquet +3 -0
  5. data/CC-MAIN-2013-20/train-00002-of-00024.parquet +3 -0
  6. data/CC-MAIN-2013-20/train-00003-of-00024.parquet +3 -0
  7. data/CC-MAIN-2013-20/train-00004-of-00024.parquet +3 -0
  8. data/CC-MAIN-2013-20/train-00005-of-00024.parquet +3 -0
  9. data/CC-MAIN-2013-20/train-00006-of-00024.parquet +3 -0
  10. data/CC-MAIN-2013-20/train-00007-of-00024.parquet +3 -0
  11. data/CC-MAIN-2013-20/train-00008-of-00024.parquet +3 -0
  12. data/CC-MAIN-2013-20/train-00009-of-00024.parquet +3 -0
  13. data/CC-MAIN-2013-20/train-00010-of-00024.parquet +3 -0
  14. data/CC-MAIN-2013-20/train-00011-of-00024.parquet +3 -0
  15. data/CC-MAIN-2013-20/train-00012-of-00024.parquet +3 -0
  16. data/CC-MAIN-2013-20/train-00013-of-00024.parquet +3 -0
  17. data/CC-MAIN-2013-20/train-00014-of-00024.parquet +3 -0
  18. data/CC-MAIN-2013-20/train-00015-of-00024.parquet +3 -0
  19. data/CC-MAIN-2013-20/train-00016-of-00024.parquet +3 -0
  20. data/CC-MAIN-2013-20/train-00017-of-00024.parquet +3 -0
  21. data/CC-MAIN-2013-20/train-00018-of-00024.parquet +3 -0
  22. data/CC-MAIN-2013-20/train-00019-of-00024.parquet +3 -0
  23. data/CC-MAIN-2013-20/train-00020-of-00024.parquet +3 -0
  24. data/CC-MAIN-2013-20/train-00021-of-00024.parquet +3 -0
  25. data/CC-MAIN-2013-20/train-00022-of-00024.parquet +3 -0
  26. data/CC-MAIN-2013-20/train-00023-of-00024.parquet +3 -0
  27. data/CC-MAIN-2013-48/train-00000-of-00013.parquet +3 -0
  28. data/CC-MAIN-2013-48/train-00001-of-00013.parquet +3 -0
  29. data/CC-MAIN-2013-48/train-00002-of-00013.parquet +3 -0
  30. data/CC-MAIN-2013-48/train-00003-of-00013.parquet +3 -0
  31. data/CC-MAIN-2013-48/train-00004-of-00013.parquet +3 -0
  32. data/CC-MAIN-2013-48/train-00005-of-00013.parquet +3 -0
  33. data/CC-MAIN-2013-48/train-00006-of-00013.parquet +3 -0
  34. data/CC-MAIN-2013-48/train-00007-of-00013.parquet +3 -0
  35. data/CC-MAIN-2013-48/train-00008-of-00013.parquet +3 -0
  36. data/CC-MAIN-2013-48/train-00009-of-00013.parquet +3 -0
  37. data/CC-MAIN-2013-48/train-00010-of-00013.parquet +3 -0
  38. data/CC-MAIN-2013-48/train-00011-of-00013.parquet +3 -0
  39. data/CC-MAIN-2013-48/train-00012-of-00013.parquet +3 -0
  40. data/CC-MAIN-2014-10/train-00000-of-00009.parquet +3 -0
  41. data/CC-MAIN-2014-10/train-00001-of-00009.parquet +3 -0
  42. data/CC-MAIN-2014-10/train-00002-of-00009.parquet +3 -0
  43. data/CC-MAIN-2014-10/train-00003-of-00009.parquet +3 -0
  44. data/CC-MAIN-2014-10/train-00004-of-00009.parquet +3 -0
  45. data/CC-MAIN-2014-10/train-00005-of-00009.parquet +3 -0
  46. data/CC-MAIN-2014-10/train-00006-of-00009.parquet +3 -0
  47. data/CC-MAIN-2014-10/train-00007-of-00009.parquet +3 -0
  48. data/CC-MAIN-2014-10/train-00008-of-00009.parquet +3 -0
  49. data/CC-MAIN-2014-15/train-00000-of-00005.parquet +3 -0
  50. data/CC-MAIN-2014-15/train-00001-of-00005.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: odc-by
5
+ dataset_info:
6
+ - config_name: CC-MAIN-2013-20
7
+ features:
8
+ - name: text
9
+ dtype: string
10
+ - name: id
11
+ dtype: string
12
+ - name: dump
13
+ dtype: string
14
+ - name: url
15
+ dtype: string
16
+ - name: file_path
17
+ dtype: string
18
+ - name: language
19
+ dtype: string
20
+ - name: language_score
21
+ dtype: float64
22
+ - name: token_count
23
+ dtype: int64
24
+ - name: score
25
+ dtype: float64
26
+ - name: int_score
27
+ dtype: int64
28
+ - name: embedding
29
+ sequence: float32
30
+ - name: count
31
+ dtype: int64
32
+ splits:
33
+ - name: train
34
+ num_bytes: 71683996286
35
+ num_examples: 10800000
36
+ download_size: 55571546426
37
+ dataset_size: 71683996286
38
+ - config_name: CC-MAIN-2013-48
39
+ features:
40
+ - name: text
41
+ dtype: string
42
+ - name: id
43
+ dtype: string
44
+ - name: dump
45
+ dtype: string
46
+ - name: url
47
+ dtype: string
48
+ - name: file_path
49
+ dtype: string
50
+ - name: language
51
+ dtype: string
52
+ - name: language_score
53
+ dtype: float64
54
+ - name: token_count
55
+ dtype: int64
56
+ - name: score
57
+ dtype: float64
58
+ - name: int_score
59
+ dtype: int64
60
+ - name: embedding
61
+ sequence: float32
62
+ - name: count
63
+ dtype: int64
64
+ splits:
65
+ - name: train
66
+ num_bytes: 38878994623
67
+ num_examples: 5800000
68
+ download_size: 30087644388
69
+ dataset_size: 38878994623
70
+ - config_name: CC-MAIN-2014-10
71
+ features:
72
+ - name: text
73
+ dtype: string
74
+ - name: id
75
+ dtype: string
76
+ - name: dump
77
+ dtype: string
78
+ - name: url
79
+ dtype: string
80
+ - name: file_path
81
+ dtype: string
82
+ - name: language
83
+ dtype: string
84
+ - name: language_score
85
+ dtype: float64
86
+ - name: token_count
87
+ dtype: int64
88
+ - name: score
89
+ dtype: float64
90
+ - name: int_score
91
+ dtype: int64
92
+ - name: embedding
93
+ sequence: float32
94
+ - name: count
95
+ dtype: int64
96
+ splits:
97
+ - name: train
98
+ num_bytes: 24971658588
99
+ num_examples: 3550000
100
+ download_size: 19058832929
101
+ dataset_size: 24971658588
102
+ - config_name: CC-MAIN-2014-15
103
+ features:
104
+ - name: text
105
+ dtype: string
106
+ - name: id
107
+ dtype: string
108
+ - name: dump
109
+ dtype: string
110
+ - name: url
111
+ dtype: string
112
+ - name: file_path
113
+ dtype: string
114
+ - name: language
115
+ dtype: string
116
+ - name: language_score
117
+ dtype: float64
118
+ - name: token_count
119
+ dtype: int64
120
+ - name: score
121
+ dtype: float64
122
+ - name: int_score
123
+ dtype: int64
124
+ - name: embedding
125
+ sequence: float32
126
+ - name: count
127
+ dtype: int64
128
+ splits:
129
+ - name: train
130
+ num_bytes: 13615746365
131
+ num_examples: 1850000
132
+ download_size: 10299687552
133
+ dataset_size: 13615746365
134
+ - config_name: CC-MAIN-2014-23
135
+ features:
136
+ - name: text
137
+ dtype: string
138
+ - name: id
139
+ dtype: string
140
+ - name: dump
141
+ dtype: string
142
+ - name: url
143
+ dtype: string
144
+ - name: file_path
145
+ dtype: string
146
+ - name: language
147
+ dtype: string
148
+ - name: language_score
149
+ dtype: float64
150
+ - name: token_count
151
+ dtype: int64
152
+ - name: score
153
+ dtype: float64
154
+ - name: int_score
155
+ dtype: int64
156
+ - name: embedding
157
+ sequence: float32
158
+ - name: count
159
+ dtype: int64
160
+ splits:
161
+ - name: train
162
+ num_bytes: 21798450754
163
+ num_examples: 3100000
164
+ download_size: 16663899441
165
+ dataset_size: 21798450754
166
+ - config_name: CC-MAIN-2014-35
167
+ features:
168
+ - name: text
169
+ dtype: string
170
+ - name: id
171
+ dtype: string
172
+ - name: dump
173
+ dtype: string
174
+ - name: url
175
+ dtype: string
176
+ - name: file_path
177
+ dtype: string
178
+ - name: language
179
+ dtype: string
180
+ - name: language_score
181
+ dtype: float64
182
+ - name: token_count
183
+ dtype: int64
184
+ - name: score
185
+ dtype: float64
186
+ - name: int_score
187
+ dtype: int64
188
+ - name: embedding
189
+ sequence: float32
190
+ - name: count
191
+ dtype: int64
192
+ splits:
193
+ - name: train
194
+ num_bytes: 10954201796
195
+ num_examples: 1500000
196
+ download_size: 8309419357
197
+ dataset_size: 10954201796
198
+ - config_name: CC-MAIN-2014-41
199
+ features:
200
+ - name: text
201
+ dtype: string
202
+ - name: id
203
+ dtype: string
204
+ - name: dump
205
+ dtype: string
206
+ - name: url
207
+ dtype: string
208
+ - name: file_path
209
+ dtype: string
210
+ - name: language
211
+ dtype: string
212
+ - name: language_score
213
+ dtype: float64
214
+ - name: token_count
215
+ dtype: int64
216
+ - name: score
217
+ dtype: float64
218
+ - name: int_score
219
+ dtype: int64
220
+ - name: embedding
221
+ sequence: float32
222
+ - name: count
223
+ dtype: int64
224
+ splits:
225
+ - name: train
226
+ num_bytes: 11392615401
227
+ num_examples: 1600000
228
+ download_size: 8694382261
229
+ dataset_size: 11392615401
230
+ - config_name: CC-MAIN-2014-42
231
+ features:
232
+ - name: text
233
+ dtype: string
234
+ - name: id
235
+ dtype: string
236
+ - name: dump
237
+ dtype: string
238
+ - name: url
239
+ dtype: string
240
+ - name: file_path
241
+ dtype: string
242
+ - name: language
243
+ dtype: string
244
+ - name: language_score
245
+ dtype: float64
246
+ - name: token_count
247
+ dtype: int64
248
+ - name: score
249
+ dtype: float64
250
+ - name: int_score
251
+ dtype: int64
252
+ - name: embedding
253
+ sequence: float32
254
+ - name: count
255
+ dtype: int64
256
+ splits:
257
+ - name: train
258
+ num_bytes: 8491740156
259
+ num_examples: 1150000
260
+ download_size: 6430841610
261
+ dataset_size: 8491740156
262
+ - config_name: CC-MAIN-2014-49
263
+ features:
264
+ - name: text
265
+ dtype: string
266
+ - name: id
267
+ dtype: string
268
+ - name: dump
269
+ dtype: string
270
+ - name: url
271
+ dtype: string
272
+ - name: file_path
273
+ dtype: string
274
+ - name: language
275
+ dtype: string
276
+ - name: language_score
277
+ dtype: float64
278
+ - name: token_count
279
+ dtype: int64
280
+ - name: score
281
+ dtype: float64
282
+ - name: int_score
283
+ dtype: int64
284
+ - name: embedding
285
+ sequence: float32
286
+ - name: count
287
+ dtype: int64
288
+ splits:
289
+ - name: train
290
+ num_bytes: 7754099049
291
+ num_examples: 1050000
292
+ download_size: 5866979308
293
+ dataset_size: 7754099049
294
+ - config_name: CC-MAIN-2014-52
295
+ features:
296
+ - name: text
297
+ dtype: string
298
+ - name: id
299
+ dtype: string
300
+ - name: dump
301
+ dtype: string
302
+ - name: url
303
+ dtype: string
304
+ - name: file_path
305
+ dtype: string
306
+ - name: language
307
+ dtype: string
308
+ - name: language_score
309
+ dtype: float64
310
+ - name: token_count
311
+ dtype: int64
312
+ - name: score
313
+ dtype: float64
314
+ - name: int_score
315
+ dtype: int64
316
+ - name: embedding
317
+ sequence: float32
318
+ - name: count
319
+ dtype: int64
320
+ splits:
321
+ - name: train
322
+ num_bytes: 9953666568
323
+ num_examples: 1350000
324
+ download_size: 7521103037
325
+ dataset_size: 9953666568
326
+ - config_name: CC-MAIN-2015-06
327
+ features:
328
+ - name: text
329
+ dtype: string
330
+ - name: id
331
+ dtype: string
332
+ - name: dump
333
+ dtype: string
334
+ - name: url
335
+ dtype: string
336
+ - name: file_path
337
+ dtype: string
338
+ - name: language
339
+ dtype: string
340
+ - name: language_score
341
+ dtype: float64
342
+ - name: token_count
343
+ dtype: int64
344
+ - name: score
345
+ dtype: float64
346
+ - name: int_score
347
+ dtype: int64
348
+ - name: embedding
349
+ sequence: float32
350
+ - name: count
351
+ dtype: int64
352
+ splits:
353
+ - name: train
354
+ num_bytes: 8988649992
355
+ num_examples: 1200000
356
+ download_size: 6771650647
357
+ dataset_size: 8988649992
358
+ - config_name: CC-MAIN-2015-11
359
+ features:
360
+ - name: text
361
+ dtype: string
362
+ - name: id
363
+ dtype: string
364
+ - name: dump
365
+ dtype: string
366
+ - name: url
367
+ dtype: string
368
+ - name: file_path
369
+ dtype: string
370
+ - name: language
371
+ dtype: string
372
+ - name: language_score
373
+ dtype: float64
374
+ - name: token_count
375
+ dtype: int64
376
+ - name: score
377
+ dtype: float64
378
+ - name: int_score
379
+ dtype: int64
380
+ - name: embedding
381
+ sequence: float32
382
+ - name: count
383
+ dtype: int64
384
+ splits:
385
+ - name: train
386
+ num_bytes: 9212466984
387
+ num_examples: 1200000
388
+ download_size: 6893305603
389
+ dataset_size: 9212466984
390
+ configs:
391
+ - config_name: CC-MAIN-2013-20
392
+ data_files:
393
+ - split: train
394
+ path: data/CC-MAIN-2013-20/train-*
395
+ - config_name: CC-MAIN-2013-48
396
+ data_files:
397
+ - split: train
398
+ path: data/CC-MAIN-2013-48/train-*
399
+ - config_name: CC-MAIN-2014-10
400
+ data_files:
401
+ - split: train
402
+ path: data/CC-MAIN-2014-10/train-*
403
+ - config_name: CC-MAIN-2014-15
404
+ data_files:
405
+ - split: train
406
+ path: data/CC-MAIN-2014-15/train-*
407
+ - config_name: CC-MAIN-2014-23
408
+ data_files:
409
+ - split: train
410
+ path: data/CC-MAIN-2014-23/train-*
411
+ - config_name: CC-MAIN-2014-35
412
+ data_files:
413
+ - split: train
414
+ path: data/CC-MAIN-2014-35/train-*
415
+ - config_name: CC-MAIN-2014-41
416
+ data_files:
417
+ - split: train
418
+ path: data/CC-MAIN-2014-41/train-*
419
+ - config_name: CC-MAIN-2014-42
420
+ data_files:
421
+ - split: train
422
+ path: data/CC-MAIN-2014-42/train-*
423
+ - config_name: CC-MAIN-2014-49
424
+ data_files:
425
+ - split: train
426
+ path: data/CC-MAIN-2014-49/train-*
427
+ - config_name: CC-MAIN-2014-52
428
+ data_files:
429
+ - split: train
430
+ path: data/CC-MAIN-2014-52/train-*
431
+ - config_name: CC-MAIN-2015-06
432
+ data_files:
433
+ - split: train
434
+ path: data/CC-MAIN-2015-06/train-*
435
+ - config_name: CC-MAIN-2015-11
436
+ data_files:
437
+ - split: train
438
+ path: data/CC-MAIN-2015-11/train-*
439
+ ---
440
+
441
+ # Fineweb-Edu-Fortified !WORK IN PROGRESS!
442
+
443
+ <figure>
444
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/646516d2200b583e1e50faf8/79yPdK79m9mA0cCz-3h4v.png" width="500" style="margin-left:auto; margin-right: auto"/>
445
+
446
+ <figcaption style="text-align: center; margin-left: auto; margin-right: auto; font-style: italic;">
447
+ The composition of fineweb-edu-fortified, produced by automatically clustering a 500k row sample in
448
+ <a href="https://app.airtrain.ai/dataset/c232b33f-4f4a-49a7-ba55-8167a5f433da/null/1/0"> Airtrain </a>
449
+ </figcaption>
450
+ </figure>
451
+
452
+ ## What is it?
453
+
454
+ Fineweb-Edu-Fortified is a dataset derived from
455
+ [Fineweb-Edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) by applying exact-match
456
+ deduplication across the whole dataset and producing an embedding for each row. The number of times
457
+ the text from each row appears is also included as a `count` column. The embeddings were produced
458
+ using [TaylorAI/bge-micro](https://huggingface.co/TaylorAI/bge-micro)
459
+
460
+ Fineweb and Fineweb-Edu were obtained by processing data from 95 crawls of
461
+ [Common Crawl](https://commoncrawl.org/), covering a time period from 2013 to 2024.
462
+ More information about the original datasets can be found by consulting:
463
+
464
+ - [Fineweb-edu dataset card](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
465
+ - [Fineweb dataset card](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
466
+ - [Fineweb release blog post](https://huggingface.co/spaces/HuggingFaceFW/blogpost-fineweb-v1)
467
+ - [Fineweb paper](https://arxiv.org/abs/2406.17557)
468
+
469
+ The contents of a randomly selected 500k rows from this dataset can be interactively
470
+ explored in this
471
+ [Airtrain](https://app.airtrain.ai/dataset/c232b33f-4f4a-49a7-ba55-8167a5f433da/null/1/0)
472
+ dashboard.
473
+
474
+ ## Deduplication
475
+
476
+ ### Deduplication in original Fineweb and Fineweb-Edu
477
+
478
+ During creation of the original Fineweb dataset, a variety of deduplication strategies were
479
+ explored. The evaluation criteria used to assess deduplication strategies was to train ablation models
480
+ on randomly selected subsets of the data, using a subset of up to ~350 billion tokens.
481
+
482
+ Using this mechanism, the Fineweb authors selected a MinHash algorithm, using parameters
483
+ considering documents with approximately 75% similarity or higher to be duplicates. This deduplication was
484
+ performed *within* each Common Crawl crawl. For example, it would have removed all approximate duplicates
485
+ from the 20th crawl from 2013, but would have retained an identical record that showed up
486
+ in both the 2013-20 crawl and the 2013-48 crawl. The authors note that applying the
487
+ deduplication *across crawls* reduced the evaluation performance of the ablation models used
488
+ for assessment. The proposed reason for this performance degredation is that data
489
+ duplicated across crawls is more likely to be high-quality compared to data that is not,
490
+ so leaving in the duplicates effectively upsamples the higer-quality data.
491
+
492
+ Following deduplication in Fineweb, Fineweb-Edu was extracted using a model-based quality classifier
493
+ targeting educational content. It thus inherited the same inter-crawl deduplication strategy of Fineweb.
494
+
495
+ ### Deduplication in this dataset
496
+
497
+ #### Motivation
498
+
499
+ Given the findings that cross-crawl deduplication reduced ablation model performance, one might ask
500
+ what the motivation is for producing a dataset that uses it. Our motivation was threefold:
501
+
502
+ - Reduce the number of rows that needed to be embedded by avoiding embedding of exact-match content
503
+ - Enable easier filtering of the dataset for subsets-of-interest
504
+ - Provide a version of the dataset for users whose training goals include avoiding training on non-unique
505
+ tokens.
506
+
507
+ For use cases that would benefit from "re-hydrating" or filtering the rows based on how frequently
508
+ the text appeared in the original dataset, the new `count` column retains the number of appearances
509
+ of the associated text.
510
+
511
+ #### Procedure
512
+
513
+ The overall procedure was to remove exact matches that appeared in multiple crawls (also referred to
514
+ as "dumps"). This was achieved by performing an md5 hash on the text column and removing rows with
515
+ duplicate hashes. To make this tractable at scale, we first grouped all rows by the first two hex
516
+ digits of their hashes, then looked for exact hash matches within each of the resulting 256
517
+ buckets of data. Note that unlike the intra-crawl deduplication, we only eliminated exact matches
518
+ across crawls. For duplicated rows, a strong preference was given to keep the metadata
519
+ (ex: dump, url) from the oldest crawl where the text appeared. Following deduplication and
520
+ embedding, the data were grouped by the "dump" column, mirroring the organization of the original
521
+ Fineweb-Edu dataset.
522
+
523
+ ### Deduplication stats
524
+
525
+ Deduplication removed approximately 74.7% of rows from the original dataset
526
+ (from 1.279 billion in Fineweb-Edu to 0.324 billion rows in Fineweb-Edu-Fortified).
527
+ This indicates that a substantial amount of data in Fineweb-Edu is present across multiple crawls.
528
+
529
+ The total token count in the deduplicated dataset is approximately 375 billion, compared to the
530
+ 1,320 billion tokens in Fineweb-Edu.
531
+
532
+ <figure>
533
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/646516d2200b583e1e50faf8/mUFyO1fUWJEXbYwiteR9e.png" width="750" style="margin-left:auto; margin-right: auto"/>
534
+
535
+ <figcaption style="text-align: center; margin-left: auto; margin-right: auto; font-style: italic;">
536
+ A histogram of the `count` column. Histogram was generated using a 500k row sample after
537
+ performing global per-row text duplication counting.
538
+ </figcaption>
539
+ </figure>
540
+
541
+ ## Embeddings
542
+
543
+ To support use cases with Fineweb-Edu such as classification, clustering, semantic search, etc.,
544
+ we have produced an embedding vector for each row in the dataset. The embedding model
545
+ [TaylorAI/bge-micro](https://huggingface.co/TaylorAI/bge-micro)
546
+ was selected for its tradeoff of strong performance on [MTEB](https://huggingface.co/spaces/mteb/leaderboard)
547
+ benchmarks relative to its size (17 million parameters). The model's embedding space
548
+ has 384 dimensions. The context-window of the model is 512 tokens (roughly several paragraphs of text);
549
+ each row is embedded by using the first 512 tokens in its text field. Producing the embeddings took approximately
550
+ 412 GPU-hours on Nvidia T4 GPUs.
551
+
552
+
553
+ <figure>
554
+ <img src="https://cdn-uploads.huggingface.co/production/uploads/646516d2200b583e1e50faf8/ycybBqIAijIqYEBPLbvhe.png" width="500" style="margin-left:auto; margin-right: auto"/>
555
+
556
+ <figcaption style="text-align: center; margin-left: auto; margin-right: auto; font-style: italic;">
557
+ A PCA projection of 5k randomly sampled embeddings.
558
+ The embeddings of a 500k row sample from this dataset can be interactively explored in
559
+ <a href="https://app.airtrain.ai/dataset/c232b33f-4f4a-49a7-ba55-8167a5f433da/null/1/0">Airtrain</a>
560
+ </figcaption>
561
+ </figure>
562
+
563
+ ## Using via `datasets`
564
+
565
+ ```python
566
+ from datasets import load_dataset
567
+ fw = load_dataset("airtrain-ai/fineweb-edu-fortified", name="CC-MAIN-2024-10", split="train", streaming=True)
568
+ ```
569
+
570
+ ## Considerations for Using the Data
571
+
572
+ This "Considerations" section is copied from the parent dataset:
573
+ [FineWeb-edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu).
574
+
575
+ ### Social Impact of Dataset
576
+
577
+ With the release of this dataset we aim to make model training more accessible to the machine learning community at large.
578
+
579
+ While multiple open-weights models with strong performance have been publicly released in the past, more often than not these releases are not accompanied by the corresponding training dataset. This is unfortunate as the dataset specificities and characteristics have been demonstrated to have a very large impact and role in the performances of the models. As the creation of a high quality training dataset is a fundamental requirement to training an LLM capable of excelling at downstream tasks, with 🍷 FineWeb we (a) not only make the dataset creation process more transparent, by sharing our entire processing setup including the codebase used, we also (b) help alleviate the costs of dataset curation, both in time and in compute, for model creators by publicly releasing our dataset with the community.
580
+
581
+ ### Discussion of Biases
582
+
583
+ Efforts were made to minimize the amount of NSFW and toxic content present in the dataset by employing filtering on the URL level. However, there are still a significant number of documents present in the final dataset that could be considered toxic or contain harmful content. As 🍷 FineWeb was sourced from the web as a whole, any harmful biases typically present in it may be reproduced on our dataset.
584
+
585
+ We deliberately avoided using machine learning filtering methods that define text quality based on the similarity to a “gold” source such as wikipedia or toxicity classifiers as these methods have been known to [disproportionately remove content in specific dialects](https://aclanthology.org/D16-1120/) and [overclassify as toxic text related to specific social identities](https://arxiv.org/pdf/2109.07445.pdf), respectively.
586
+
587
+ ### Other Known Limitations
588
+
589
+ As a consequence of some of the filtering steps applied, it is likely that code content is not prevalent in our dataset. If you are training a model that should also perform code tasks, we recommend you use 🍷 FineWeb with a code dataset, such as [The Stack v2](https://huggingface.co/datasets/bigcode/the-stack-v2). You should also probably consider complementing 🍷 FineWeb with specialized curated sources (such as Wikipedia, for example) as they will likely have better formatting than the wikipedia content included in 🍷 FineWeb (we did not tailor the processing to individual websites).
590
+
591
+ ## Additional Information
592
+
593
+ ### Acknowledgements
594
+
595
+ Airtrain would like to thank the Fineweb/Fineweb-Edu team at Hugging Face for producing the original datasets,
596
+ as well as for their support during work on Fineweb-Edu-Fortified.
597
+
598
+ We'd also like to thank [@underspirit](https://huggingface.co/underspirit) for
599
+ [pointing out](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu/discussions/7)
600
+ the amount of reduction in dataset size that could be achieved via deduplication.
601
+
602
+ We owe gratitude to [TaylorAI](https://huggingface.co/TaylorAI) for the `bge-micro` embedding model.
603
+
604
+ Finally, thank you to the Hugging Face community for fostering a thriving ecosystem of models, datasets, and tools
605
+ to support open-source AI.
606
+
607
+ ### Licensing Information
608
+
609
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0** [license](https://opendatacommons.org/licenses/by/1-0/). The use of this dataset is also subject to [CommonCrawl's Terms of Use](https://commoncrawl.org/terms-of-use).
data/CC-MAIN-2013-20/train-00000-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a47440846d495bf3926ba8f9897518e1a3cae977eacb39a279ed9c9ad10ec11
3
+ size 2311611505
data/CC-MAIN-2013-20/train-00001-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5824ccac6aa8e0978e5e04de3757765c036ba2125b35f8aa44a36221ec3b0391
3
+ size 2314384236
data/CC-MAIN-2013-20/train-00002-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66e1e7d6eba24309844fea7979d80f646dd72df9ee9da18319b3be60075c4bcc
3
+ size 2313811886
data/CC-MAIN-2013-20/train-00003-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:822546852cda5be884d5b0ee41a201ef1ad36fdedd56804a52cd492145de18bc
3
+ size 2319781351
data/CC-MAIN-2013-20/train-00004-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ffd43fac6b6fa1bff3d1e575a800a197d9e38a93dc5ace184374cb1c07fa0fc
3
+ size 2313754629
data/CC-MAIN-2013-20/train-00005-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57d4b0fe29fdaf70ce9a17c6d314f3c2d6d4c3de8b24f76dd1059f949b1938b3
3
+ size 2313051240
data/CC-MAIN-2013-20/train-00006-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6973d8577b0bd127e0b7fd027ba083a5bd4761dff5b6d56b9f5dcef03c283a
3
+ size 2322559050
data/CC-MAIN-2013-20/train-00007-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e8fe22282f6492c3654ba6e69198f670ebf7942b68b2852479e5762252541d9
3
+ size 2320751053
data/CC-MAIN-2013-20/train-00008-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:307ad70edf246d11150a475d219730a78013485b8570022c96620af72e265e6e
3
+ size 2316409653
data/CC-MAIN-2013-20/train-00009-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd67301f056f8407a95350f7dc2251f41279fdb8fc760c5ac9d1a005de1c20ff
3
+ size 2316324727
data/CC-MAIN-2013-20/train-00010-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c44748d01f8ad270f08d7267d02e26644198c0994ddd4ad3999fe32bee8cd4e5
3
+ size 2319309888
data/CC-MAIN-2013-20/train-00011-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8b98f3659daf5a036b80ea6e500567f1bec8862b91d2d639f5a69e8aad938a1
3
+ size 2314003459
data/CC-MAIN-2013-20/train-00012-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:242b98308b5fbffa00bf3e535b16f118501b8b88f76b56ef3f5b2857a3b7b9f3
3
+ size 2311616015
data/CC-MAIN-2013-20/train-00013-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffd0bcb10904f719879e3b695351bcdc8d3b1830995697e876429cb7ac19f457
3
+ size 2318511130
data/CC-MAIN-2013-20/train-00014-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919f594bb0f6e5162a48f640ac690d2c9a7b0b714ba53d3a4c28f994512b50d3
3
+ size 2313884807
data/CC-MAIN-2013-20/train-00015-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f97a4dfcc319be6b1fcc6f72b90d615cf48c2368cdea4d060fecb4465a516a
3
+ size 2318174523
data/CC-MAIN-2013-20/train-00016-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855d33dcc85f8e7a195cb73bc243bd08274930f81985dcbb331b41ce356377bc
3
+ size 2317891327
data/CC-MAIN-2013-20/train-00017-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd42f5718efb2b55b70f16a9cbf1fc6eba015ba6a426b68e18a56849cc1fbc3a
3
+ size 2313309788
data/CC-MAIN-2013-20/train-00018-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a78354484b3d76de45cc577dcbc21182364494332441f83c487ffaa0985ed6d
3
+ size 2318492121
data/CC-MAIN-2013-20/train-00019-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4998793d0d7cf718ae1d9cf6760b9b1f3ecc55432b57f2e8e5e502e1947b81a9
3
+ size 2310098237
data/CC-MAIN-2013-20/train-00020-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cefd8c2279f8ddaac72ea71df3335e4bb8ca660be0841837c67e0780a205dafa
3
+ size 2312730336
data/CC-MAIN-2013-20/train-00021-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96b14ee5822872bb199285841cb0bd998ef469343dc1e3052066c94c20fbc3f2
3
+ size 2315645610
data/CC-MAIN-2013-20/train-00022-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ca800b53e452a479ed8f7143dedd4d063962ec49adc036e94c3a32646d9173
3
+ size 2316823574
data/CC-MAIN-2013-20/train-00023-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5744946554ada978b583876248dd655b9975409dc4d1e76785aa06ff0d20c84
3
+ size 2308616281
data/CC-MAIN-2013-48/train-00000-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51a96a33731bd1dd6d4593e663512f25a8886409230e5e96a3666c904c881a9f
3
+ size 2316438765
data/CC-MAIN-2013-48/train-00001-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cc01bacadc88271e42c7789590ccc71faa130dccb90a5b21003762982f84f9d
3
+ size 2316463084
data/CC-MAIN-2013-48/train-00002-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdeac7305afcf235ea7e9cbe3708308a01274a7090a724b44e4c71bc5ec7a61
3
+ size 2317567424
data/CC-MAIN-2013-48/train-00003-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f159ba4061ae1c6be14fe628ab738125fc096450a3fb71ca3139ab112462d0
3
+ size 2311228330
data/CC-MAIN-2013-48/train-00004-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd4357b6e625326a056eab302910862b25a5210631c16e0bec47fbd480d77949
3
+ size 2312287905
data/CC-MAIN-2013-48/train-00005-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8963b54a74b7098ec104ba860ec08401d7f34c3dff2994220241934037fb59a
3
+ size 2317989879
data/CC-MAIN-2013-48/train-00006-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50e2d95b8c61838f3acd94538ebf52af54015289c56bf4cf6574bda29c1951c
3
+ size 2311797244
data/CC-MAIN-2013-48/train-00007-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859b56981537ddbd2a2cf12bc88d02ba1e8028ea2b0ce27e36481f9312f7bda3
3
+ size 2315529779
data/CC-MAIN-2013-48/train-00008-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe98cb6c6880d5a9b6b96dd6f7e01b3a85eb44b9f36c239f091aeb8d5c7dc6da
3
+ size 2315072894
data/CC-MAIN-2013-48/train-00009-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ce61c31ab094bc6cff0ea1a4a757b171203666f52f5422ee2385fa0b6f5ede5
3
+ size 2311541777
data/CC-MAIN-2013-48/train-00010-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d44610927d9062a221b8e12ce05b8732a3a2f1d0757fc7fdc8f019df15e15149
3
+ size 2311440986
data/CC-MAIN-2013-48/train-00011-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:652bbaaa1fe091a2cbcee8dd484a9463567510b8b5fce0b566094d1402228194
3
+ size 2310864878
data/CC-MAIN-2013-48/train-00012-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa6b9bbced65ccbacd0827cb65818af8e77a7f2eeea71ceec27f4f91bed5d748
3
+ size 2319421443
data/CC-MAIN-2014-10/train-00000-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae021359ef0c8b0f439e50f5f99de384f22ed1b85818b927bf259d162ec9ad9
3
+ size 2115692034
data/CC-MAIN-2014-10/train-00001-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fcf591a493e778e443fe4a76937c52386b5fe987c75c29a80035e3a4bfe9225
3
+ size 2119099957
data/CC-MAIN-2014-10/train-00002-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322bb80c66da4f5cf6fe2789f938010cd029cdb881abbdb9eaec1efb31f6dbbf
3
+ size 2117492731
data/CC-MAIN-2014-10/train-00003-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5e2548fe3a1c353d4713a75b484c0258f8b7e2051bdc7370c1f6b448f4aee15
3
+ size 2118349913
data/CC-MAIN-2014-10/train-00004-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95770b22d671cac1987404826481a927072555e59b47dea0a57ad9db2b491b4f
3
+ size 2120006649
data/CC-MAIN-2014-10/train-00005-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86de5315e7bf6e8973a6f2003100f995b261c40e2375d312c5cab104abb4ade4
3
+ size 2119245705
data/CC-MAIN-2014-10/train-00006-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a311e7ba977e45f2f7feedf2420c5d8f013bdb3b615c87b0ec3a951ad58779
3
+ size 2114816960
data/CC-MAIN-2014-10/train-00007-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc923354df671f355af18d3bcd2f5ce9a6b729bf73f352e32d626f41cedc0e64
3
+ size 2115622239
data/CC-MAIN-2014-10/train-00008-of-00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a53e2c8bd89bd62a04cec8a143b10f50b20ac50e7dcfe1f978f1a4b0c516f7a
3
+ size 2118506741
data/CC-MAIN-2014-15/train-00000-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27e8f01d0ed1c982291a432987b5a0d698307b990ad8221740625a7ee6395f0b
3
+ size 2057930743
data/CC-MAIN-2014-15/train-00001-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f92f2253e3dabc804514b7e24bc9eadc49274262737b6e172c7cd389455ed5
3
+ size 2060712108