File size: 1,099 Bytes
9a6b849
 
4eaf7ce
 
9a6b849
4eaf7ce
 
5268f3d
9a6b849
caa9290
4eaf7ce
caa9290
b58127a
 
caa9290
 
b58127a
9a6b849
 
b58127a
4eaf7ce
9a6b849
 
 
caa9290
 
4eaf7ce
9a6b849
 
 
 
 
 
b58127a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import os
from dotenv import load_dotenv
from transformers import TFBertForSequenceClassification, BertTokenizerFast
import tensorflow as tf

# Directly specify model and API key
MODEL_NAME = "Erfan11/Neuracraft"
API_KEY = "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"

def load_model(Erfan11/Neuracraft):
    # Load the TensorFlow model from Hugging Face Hub
    model = TFBertForSequenceClassification.from_pretrained(model_name, use_auth_token=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)
    return model

def load_tokenizer(Erfan11/Neuracraft):
    tokenizer = BertTokenizerFast.from_pretrained(model_name, use_auth_token=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)
    return tokenizer

def predict(text, model, tokenizer):
    inputs = tokenizer(text, return_tensors="tf")
    outputs = model(**inputs)
    return outputs

def main():
    model_name = Erfan11/Neuracraft
    model = load_model(Erfan11/Neuracraft)
    tokenizer = load_tokenizer(model_name)
    # Example usage
    text = "Sample input text"
    result = predict(text, model, tokenizer)
    print(result)

if __name__ == "__main__":
    main()