File size: 966 Bytes
219c6cc db3f254 3c57010 db3f254 219c6cc 3c57010 db3f254 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 |
from huggingface_hub import hf_hub_download
from transformers import AutoModelForPreTraining, AutoTokenizer
import os
# Download model and tokenizer files
model_path = hf_hub_download("law-ai/InLegalBERT", filename="pytorch_model.bin")
tokenizer_path = hf_hub_download("law-ai/InLegalBERT", filename="tokenizer_config.json")
# Define the path where the model and tokenizer will be loaded from
model_directory = "/tmp/InLegalBERT"
# Create the directory if it doesn't exist
os.makedirs(model_directory, exist_ok=True)
# Move the downloaded files to the appropriate directory
os.rename(model_path, os.path.join(model_directory, "pytorch_model.bin"))
os.rename(tokenizer_path, os.path.join(model_directory, "tokenizer_config.json"))
# Load the model and tokenizer from the local directory
tokenizer = AutoTokenizer.from_pretrained(model_directory)
model = AutoModelForPreTraining.from_pretrained(model_directory)
print("Model and tokenizer loaded successfully!")
|