Erfan11 commited on
Commit
c2c7c25
1 Parent(s): ee8b512

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -19
app.py CHANGED
@@ -1,19 +1,68 @@
1
- import gradio as gr
2
-
3
- # Define your AI model or function here
4
- def my_ai_model(input_text):
5
- # Your AI logic here (for now it just repeats the input)
6
- result = f"You said: {input_text}"
7
- return result
8
-
9
- # Create a Gradio interface
10
- interface = gr.Interface(
11
- fn=my_ai_model,
12
- inputs="text",
13
- outputs="text",
14
- title="My Simple AI",
15
- description="This AI responds to your input!"
16
- )
17
-
18
- # Launch the interface
19
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ import torch
5
+ from transformers import pipeline
6
+ import cv2
7
+ import os
8
+
9
+ app = Flask(__name__)
10
+
11
+ # Example model loading (you can replace this with your actual models)
12
+ # TensorFlow model
13
+ tf_model = tf.keras.models.load_model('path_to_your_tf_model')
14
+
15
+ # PyTorch model
16
+ torch_model = torch.load('path_to_your_torch_model')
17
+ torch_model.eval()
18
+
19
+ # Hugging Face Transformers pipeline (e.g., for text generation)
20
+ text_gen_pipeline = pipeline("text-generation", model="gpt2")
21
+
22
+ @app.route('/')
23
+ def index():
24
+ return "Welcome to the AI app! Endpoints are ready."
25
+
26
+ # Endpoint to make predictions using TensorFlow model
27
+ @app.route('/predict_tf', methods=['POST'])
28
+ def predict_tf():
29
+ data = request.json
30
+ input_data = np.array(data['input'])
31
+ prediction = tf_model.predict(input_data)
32
+ return jsonify({"prediction": prediction.tolist()})
33
+
34
+ # Endpoint to make predictions using PyTorch model
35
+ @app.route('/predict_torch', methods=['POST'])
36
+ def predict_torch():
37
+ data = request.json
38
+ input_data = torch.tensor(data['input'])
39
+ prediction = torch_model(input_data)
40
+ return jsonify({"prediction": prediction.detach().numpy().tolist()})
41
+
42
+ # Text generation using Hugging Face Transformers
43
+ @app.route('/generate_text', methods=['POST'])
44
+ def generate_text():
45
+ data = request.json
46
+ prompt = data['prompt']
47
+ result = text_gen_pipeline(prompt, max_length=100, num_return_sequences=1)
48
+ return jsonify({"generated_text": result[0]['generated_text']})
49
+
50
+ # Endpoint to process an image (using OpenCV)
51
+ @app.route('/process_image', methods=['POST'])
52
+ def process_image():
53
+ if 'file' not in request.files:
54
+ return "No file found", 400
55
+ file = request.files['file']
56
+ img = cv2.imdecode(np.frombuffer(file.read(), np.uint8), cv2.IMREAD_COLOR)
57
+ # Example processing: convert image to grayscale
58
+ gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
59
+ # Save the processed image
60
+ processed_path = 'processed_image.jpg'
61
+ cv2.imwrite(processed_path, gray_img)
62
+ return jsonify({"message": "Image processed", "file_path": processed_path})
63
+
64
+ # Future abilities and additional features can be added here
65
+
66
+ if __name__ == '__main__':
67
+ # Run the app
68
+ app.run(host='0.0.0.0', port=5000, debug=True)