File size: 2,774 Bytes
c2c7c25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02b76f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from flask import Flask, request, jsonify
import numpy as np
import tensorflow as tf
import torch
from transformers import pipeline
import cv2
import os

app = Flask(__name__)

# Example model loading (you can replace this with your actual models)
# TensorFlow model
tf_model = tf.keras.models.load_model('path_to_your_tf_model')

# PyTorch model
torch_model = torch.load('path_to_your_torch_model')
torch_model.eval()

# Hugging Face Transformers pipeline (e.g., for text generation)
text_gen_pipeline = pipeline("text-generation", model="gpt2")

@app.route('/')
def index():
    return "Welcome to the AI app! Endpoints are ready."

# Endpoint to make predictions using TensorFlow model
@app.route('/predict_tf', methods=['POST'])
def predict_tf():
    data = request.json
    input_data = np.array(data['input'])
    prediction = tf_model.predict(input_data)
    return jsonify({"prediction": prediction.tolist()})

# Endpoint to make predictions using PyTorch model
@app.route('/predict_torch', methods=['POST'])
def predict_torch():
    data = request.json
    input_data = torch.tensor(data['input'])
    prediction = torch_model(input_data)
    return jsonify({"prediction": prediction.detach().numpy().tolist()})

# Text generation using Hugging Face Transformers
@app.route('/generate_text', methods=['POST'])
def generate_text():
    data = request.json
    prompt = data['prompt']
    result = text_gen_pipeline(prompt, max_length=100, num_return_sequences=1)
    return jsonify({"generated_text": result[0]['generated_text']})

# Endpoint to process an image (using OpenCV)
@app.route('/process_image', methods=['POST'])
def process_image():
    if 'file' not in request.files:
        return "No file found", 400
    file = request.files['file']
    img = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_COLOR)
    # Example processing: convert image to grayscale
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Save the processed image
    processed_path = 'processed_image.jpg'
    cv2.imwrite(processed_path, gray_img)
    return jsonify({"message": "Image processed", "file_path": processed_path})

# Future abilities and additional features can be added here

if __name__ == '__main__':
    # Run the app
    app.run(host='0.0.0.0', port=5000, debug=True)
    import os
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
import os
import tensorflow as tf

# Set environment variable
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"

# Check GPU availability
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))

# Your main code goes here
def main():
    # Your application code
    pass

if __name__ == "__main__":
    main()