Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,93 +1,19 @@
|
|
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
-
import numpy as np
|
3 |
-
import tensorflow as tf
|
4 |
-
import torch
|
5 |
-
from transformers import pipeline
|
6 |
-
import cv2
|
7 |
-
import os
|
8 |
|
9 |
app = Flask(__name__)
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
@app.route('/')
|
23 |
-
def index():
|
24 |
-
return "Welcome to the AI app! Endpoints are ready."
|
25 |
-
|
26 |
-
# Endpoint to make predictions using TensorFlow model
|
27 |
-
@app.route('/predict_tf', methods=['POST'])
|
28 |
-
def predict_tf():
|
29 |
-
data = request.json
|
30 |
-
input_data = np.array(data['input'])
|
31 |
-
prediction = tf_model.predict(input_data)
|
32 |
-
return jsonify({"prediction": prediction.tolist()})
|
33 |
-
|
34 |
-
# Endpoint to make predictions using PyTorch model
|
35 |
-
@app.route('/predict_torch', methods=['POST'])
|
36 |
-
def predict_torch():
|
37 |
-
data = request.json
|
38 |
-
input_data = torch.tensor(data['input'])
|
39 |
-
prediction = torch_model(input_data)
|
40 |
-
return jsonify({"prediction": prediction.detach().numpy().tolist()})
|
41 |
-
|
42 |
-
# Text generation using Hugging Face Transformers
|
43 |
-
@app.route('/generate_text', methods=['POST'])
|
44 |
-
def generate_text():
|
45 |
-
data = request.json
|
46 |
-
prompt = data['prompt']
|
47 |
-
result = text_gen_pipeline(prompt, max_length=100, num_return_sequences=1)
|
48 |
-
return jsonify({"generated_text": result[0]['generated_text']})
|
49 |
-
|
50 |
-
# Endpoint to process an image (using OpenCV)
|
51 |
-
@app.route('/process_image', methods=['POST'])
|
52 |
-
def process_image():
|
53 |
-
if 'file' not in request.files:
|
54 |
-
return "No file found", 400
|
55 |
-
file = request.files['file']
|
56 |
-
img = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_COLOR)
|
57 |
-
# Example processing: convert image to grayscale
|
58 |
-
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
59 |
-
# Save the processed image
|
60 |
-
processed_path = 'processed_image.jpg'
|
61 |
-
cv2.imwrite(processed_path, gray_img)
|
62 |
-
return jsonify({"message": "Image processed", "file_path": processed_path})
|
63 |
-
|
64 |
-
# Future abilities and additional features can be added here
|
65 |
|
66 |
if __name__ == '__main__':
|
67 |
-
|
68 |
-
app.run(host='0.0.0.0', port=5000, debug=True)
|
69 |
-
import os
|
70 |
-
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
71 |
-
import tensorflow as tf
|
72 |
-
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
|
73 |
-
import os
|
74 |
-
import tensorflow as tf
|
75 |
-
|
76 |
-
# Set environment variable
|
77 |
-
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
78 |
-
|
79 |
-
# Check GPU availability
|
80 |
-
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
|
81 |
-
|
82 |
-
# Your main code goes here
|
83 |
-
def main():
|
84 |
-
# Your application code
|
85 |
-
pass
|
86 |
-
|
87 |
-
if __name__ == "__main__":
|
88 |
-
main()
|
89 |
-
from transformers import AutoTokenizer, TFAutoModelForCausalLM
|
90 |
-
|
91 |
-
# Load the tokenizer and model from Hugging Face using your model name
|
92 |
-
tokenizer = AutoTokenizer.from_pretrained("Erfan11/Neuracraft")
|
93 |
-
model = TFAutoModelForCausalLM.from_pretrained("Erfan11/Neuracraft")
|
|
|
1 |
+
from transformers import AutoModel, AutoTokenizer
|
2 |
from flask import Flask, request, jsonify
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
app = Flask(__name__)
|
5 |
|
6 |
+
# Load model and tokenizer from Hugging Face Hub
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("Erfan11/Neuracraft", use_auth_token="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")
|
8 |
+
model = AutoModel.from_pretrained("Erfan11/Neuracraft", use_auth_token="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")
|
9 |
|
10 |
+
@app.route('/predict', methods=['POST'])
|
11 |
+
def predict():
|
12 |
+
data = request.get_json()
|
13 |
+
inputs = tokenizer(data["text"], return_tensors="pt")
|
14 |
+
outputs = model(**inputs)
|
15 |
+
# Process your model's output as needed
|
16 |
+
return jsonify(outputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
if __name__ == '__main__':
|
19 |
+
app.run(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|