Web部署AI模型-Flask2(测试本地模型)
将模型转换成onnx模型,然后用本地图像进行测试
# 主代码
import io
import numpy as np
import onnxruntime
from PIL import Image
from flask import Flask, request, jsonify
from gevent.pywsgi import WSGIServer
app = Flask(__name__)
@app.route('/')
def hello():
return "Deploy"
@app.route('/predict', methods=["GET", "POST"])
def predict():
# Initialize the data dictionary that will be returned from the view.
data = {"success": False}
if request.method == 'POST':
model_dir = None
input_dir = None
if request.files.get('model'):
model_dir = request.files['model'].read()
if request.files.get('image'):
input_dir = request.files['image'].read()#.decode("utf-8")
if model_dir is not None and input_dir is not None:
input_arr = Image.open(io.BytesIO(input_dir))
input_data_array = np.transpose(np.array(input_arr).astype(np.float32), (2, 1, 0))
input_data_array = np.expand_dims(input_data_array, axis=0)
session = onnxruntime.InferenceSession(model_dir)
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
predicted_patch = session.run([output_name], {input_name: input_data_array})[0][0]
data['predictions'] = np.argmax(predicted_patch).tolist()
data['success'] = True
return jsonify(data)
if __name__ == '__main__':
# app.run(debug=True)
http_server = WSGIServer(('0.0.0.0', 5003), app)
http_server.serve_forever()
# 本地测试web服务
import requests
url = "http://172.0.0.1:5003/predict"
image_dir = "./test.png"
model_dir = "./resnet18.onnx"
with open(image_dir, 'rb') as img:
with open(model_dir, 'rb') as mod:
response = requests.post(url, files={'image': img, 'model': mod})
print(response.text)

浙公网安备 33010602011771号