commit files to HF hub
Browse files- inference.py +10 -0
inference.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from optimum.intel.openvino import OVModelForQuestionAnswering
|
2 |
+
from transformers import AutoTokenizer, pipeline
|
3 |
+
|
4 |
+
# model_id should be set to either a local directory or a model available on the HuggingFace hub.
|
5 |
+
model_id = "helenai/csarron-bert-base-uncased-squad-v1-ov-fp32"
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
7 |
+
model = OVModelForQuestionAnswering.from_pretrained(model_id)
|
8 |
+
pipe = pipeline("question-answering", model=model, tokenizer=tokenizer)
|
9 |
+
result = pipe("What is OpenVINO?", "OpenVINO is a framework that accelerates deep learning inferencing")
|
10 |
+
print(result)
|