|
import os |
|
import sys |
|
import pandas as pd |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
import json |
|
import re |
|
from langdetect import detect |
|
|
|
def traverse_directory(root_path, callback): |
|
for dirpath, _, filenames in os.walk(root_path): |
|
for filename in filenames: |
|
file_path = os.path.join(dirpath, filename) |
|
callback(file_path) |
|
|
|
|
|
def process_file(file_path): |
|
if not file_path.endswith(".txt"): |
|
return |
|
|
|
with open(file_path, "r", encoding="utf-8") as file: |
|
content = file.read() |
|
|
|
dirname = os.path.dirname(file_path) |
|
dir_name = os.path.basename(dirname) |
|
top_level_directory = os.path.relpath(dirname, root_directory).split(os.sep)[0] |
|
|
|
if dir_name.lower() == "english": |
|
append_to_parquet(content, file_path, "en", top_level_directory) |
|
elif dir_name.lower() == "hebrew": |
|
append_to_parquet(content, file_path, "he", top_level_directory) |
|
|
|
|
|
def append_to_parquet(content, file_path, lang, top_level_directory, data_dir = "data"): |
|
if not os.path.exists(data_dir): |
|
os.makedirs(data_dir) |
|
|
|
if lang == "en": |
|
parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_english.parquet") |
|
elif lang == "he": |
|
parquet_file = os.path.join(data_dir, f"train_{top_level_directory}_hebrew.parquet") |
|
else: |
|
return |
|
|
|
|
|
|
|
file_pattern = re.search(r'\[[a-zA-Z]{2}\]\.txt$', file_path) |
|
if file_pattern: |
|
print(f"Warning: File '{file_path}' was skipped due to the detected [xy] pattern.") |
|
return |
|
|
|
|
|
if lang == "en": |
|
sample_text = content[:500] if len(content) > 500 else content |
|
detected_lang = detect(sample_text) |
|
if detected_lang != 'en' and detected_lang != 'id': |
|
print(f"Warning: Non-English content detected in file '{file_path}'. Detected language: {detected_lang}") |
|
return |
|
|
|
|
|
content = re.sub(r'<(?:span|b|big|small|strong|br|sup[^>]*)[^>]*>|</(?:span|b|big|small|strong|sup)>', '', content) |
|
content = re.sub(r'https?://\S+', '', content) |
|
|
|
|
|
if lang == "he": |
|
content = re.sub(r'[\u0591-\u05AF]', '', content) |
|
|
|
|
|
chapter_markers = ['Chapter', 'Halakhah','Siman','Mitzvah'] |
|
for marker in chapter_markers: |
|
content = re.sub(rf'^{marker} +\d+$', '', content, flags=re.MULTILINE) |
|
|
|
metadata = {"file": file_path} |
|
meta_json = json.dumps(metadata) |
|
|
|
data = pd.DataFrame({"meta": [meta_json], "text": [content]}) |
|
table = pa.Table.from_pandas(data) |
|
|
|
if not os.path.exists(parquet_file) or os.path.getsize(parquet_file) == 0: |
|
with pq.ParquetWriter(parquet_file, table.schema, compression="snappy") as writer: |
|
writer.write_table(table) |
|
else: |
|
pf = pq.ParquetFile(parquet_file) |
|
old_table = pf.read() |
|
combined_table = pa.concat_tables([old_table, table]) |
|
|
|
with pq.ParquetWriter(parquet_file, combined_table.schema, compression="snappy") as writer: |
|
writer.write_table(combined_table) |
|
|
|
print(f"Successfully saved: {file_path}") |
|
|
|
|
|
if __name__ == "__main__": |
|
if len(sys.argv) != 2: |
|
print("Usage: python script.py <root_directory_path>") |
|
sys.exit(1) |
|
|
|
root_directory = sys.argv[1] |
|
traverse_directory(root_directory, process_file) |
|
|
|
|