File size: 1,333 Bytes
7df9487 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import ftfy
import json
import random
random.seed(1)
import argparse
import ir_datasets
from tqdm.auto import tqdm
def load_examples(base_dataset):
dataset = ir_datasets.load(base_dataset)
documents = {}
for doc in tqdm(
dataset.docs_iter(), total=dataset.docs_count(), desc="Loading documents"
):
text = (
f"{doc.title} {doc.text}"
if "title" in dataset.docs_cls()._fields
else doc.text
)
documents[doc.doc_id] = ftfy.fix_text(text)
queries = {}
for query in dataset.queries_iter():
queries[query.query_id] = ftfy.fix_text(query.text)
return [
(queries[qrel.query_id], documents[qrel.doc_id])
for qrel in dataset.qrels_iter()
]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--num_examples', default=500_000)
parser.add_argument('--dataset', default='beir/msmarco/train')
parser.add_argument('--output', required=True)
args = parser.parse_args()
examples = load_examples(args.dataset)
random.shuffle(examples)
with open(args.output, 'w') as f:
for (query, document) in tqdm(examples[:args.num_examples], total=args.num_examples, desc='Writing'):
f.write(json.dumps({'query': query, 'document': document}) + '\n')
|