Textwizai / app.py
Erfan11's picture
Create app.py
02b76f2 verified
raw
history blame
No virus
2.77 kB
from flask import Flask, request, jsonify
import numpy as np
import tensorflow as tf
import torch
from transformers import pipeline
import cv2
import os
app = Flask(__name__)
# Example model loading (you can replace this with your actual models)
# TensorFlow model
tf_model = tf.keras.models.load_model('path_to_your_tf_model')
# PyTorch model
torch_model = torch.load('path_to_your_torch_model')
torch_model.eval()
# Hugging Face Transformers pipeline (e.g., for text generation)
text_gen_pipeline = pipeline("text-generation", model="gpt2")
@app.route('/')
def index():
return "Welcome to the AI app! Endpoints are ready."
# Endpoint to make predictions using TensorFlow model
@app.route('/predict_tf', methods=['POST'])
def predict_tf():
data = request.json
input_data = np.array(data['input'])
prediction = tf_model.predict(input_data)
return jsonify({"prediction": prediction.tolist()})
# Endpoint to make predictions using PyTorch model
@app.route('/predict_torch', methods=['POST'])
def predict_torch():
data = request.json
input_data = torch.tensor(data['input'])
prediction = torch_model(input_data)
return jsonify({"prediction": prediction.detach().numpy().tolist()})
# Text generation using Hugging Face Transformers
@app.route('/generate_text', methods=['POST'])
def generate_text():
data = request.json
prompt = data['prompt']
result = text_gen_pipeline(prompt, max_length=100, num_return_sequences=1)
return jsonify({"generated_text": result[0]['generated_text']})
# Endpoint to process an image (using OpenCV)
@app.route('/process_image', methods=['POST'])
def process_image():
if 'file' not in request.files:
return "No file found", 400
file = request.files['file']
img = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_COLOR)
# Example processing: convert image to grayscale
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Save the processed image
processed_path = 'processed_image.jpg'
cv2.imwrite(processed_path, gray_img)
return jsonify({"message": "Image processed", "file_path": processed_path})
# Future abilities and additional features can be added here
if __name__ == '__main__':
# Run the app
app.run(host='0.0.0.0', port=5000, debug=True)
import os
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
import os
import tensorflow as tf
# Set environment variable
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Check GPU availability
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
# Your main code goes here
def main():
# Your application code
pass
if __name__ == "__main__":
main()