Python 使用flask部署Keras模型,POST请求总是给出相同的答案

Python 使用flask部署Keras模型,POST请求总是给出相同的答案,python,api,flask,keras,deployment,Python,Api,Flask,Keras,Deployment,我正在建立一个Keras模型,根据珊瑚的种类对图片进行分类。该模型运行良好,但是当我构建一个FlaskAPI来访问该模型时,我总是得到相同的答案。目前,该模型区分了三种不同的珊瑚,在API之外进行测试时效果良好 主色被准确地发送。我试着重新训练这个模型,但不起作用。当我切换到一个不同的二进制模型来检测猫对狗时,它似乎工作得很好 我在jupyter和通过发送post请求测试了相同的图像,得到了非常不同的答案。 这是服务器代码 import os from flask import Flask, f

我正在建立一个Keras模型,根据珊瑚的种类对图片进行分类。该模型运行良好,但是当我构建一个FlaskAPI来访问该模型时,我总是得到相同的答案。目前,该模型区分了三种不同的珊瑚,在API之外进行测试时效果良好

主色被准确地发送。我试着重新训练这个模型,但不起作用。当我切换到一个不同的二进制模型来检测猫对狗时,它似乎工作得很好

我在jupyter和通过发送post请求测试了相同的图像,得到了非常不同的答案。 这是服务器代码

import os
from flask import Flask, flash, request, redirect, url_for, jsonify
from werkzeug.utils import secure_filename
import cv2
import numpy as np
import keras
from keras.models import load_model
from keras import backend as K


UPLOAD_FOLDER = './uploads/'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

@app.route('/', methods=['GET', 'POST'])
def upload_file():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit an empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            image = cv2.imread(os.path.dirname(os.path.realpath(__file__))+"/uploads/"+filename)
            color_result = getDominantColor(image)
            coral = coraltype(image)
            #return redirect(url_for('upload_file',filename=filename)), jsonify({"key":
            return jsonify({"MainColor": color_result, "species": coral} )
    return '''
    <!doctype html>
    <title>API</title>
    <h1>API Running Successfully</h1>'''

def coraltype(image):
    '''Determines if the image contains a cat or dog'''

    model = load_model('./models/coral_classifier_cnn_1.h5')
    image = cv2.resize(image, (40,40), interpolation = cv2.INTER_AREA)
    image = image.reshape(1,40,40,3) 
    res = model.predict_classes(image, 1, verbose = 0)
    print(res)
    print(type(res))
    res = str(res)
    K.clear_session()
    return res

def getDominantColor(image):
    '''returns the dominate color among Blue, Green and Reds in the image '''
    B, G, R = cv2.split(image)
    B, G, R = np.sum(B), np.sum(G), np.sum(R)
    color_sums = [B,G,R]
    color_values = {"0": "Blue", "1":"Green", "2": "Red"}
    return color_values[str(np.argmax(color_sums))]


if __name__ == "__main__":
    app.run()

不要只是复制粘贴整个代码。尝试调试它以缩小问题范围。如果您确信模型是正确的,那么调试flask代码。主色调的值也一样吗?查看Python调试的pdb。应该提供更多信息,我的错。主色的值正在更改,尝试了加载模型并重新训练模型的其他方法,但仍然没有更改。您的模型是否在其他情况下工作?什么是性能指标?
from __future__ import print_function
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
import os

num_classes = 3
img_rows, img_cols = 40, 40
batch_size = 16

train_data_dir = './corals/train'
validation_data_dir = './corals/validation'

train_datagen = ImageDataGenerator(
      rescale=1./255,
      rotation_range=30,
      width_shift_range=0.3,
      height_shift_range=0.3,
      horizontal_flip=True,
      fill_mode='nearest')

validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=True)

validation_generator = validation_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_rows, img_cols),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=False)

model = Sequential()

# Padding = 'same'  results in padding the input such that
# the output has the same length as the original input
model.add(Conv2D(32, (3, 3), padding='same',
                 input_shape= (img_rows, img_cols, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

from keras.optimizers import RMSprop, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau


checkpoint = ModelCheckpoint("/home/deeplearningcv/DeepLearningCV/Trained Models/coral_classifier_cnn_1.h5",
                             monitor="val_loss",
                             mode="min",
                             save_best_only = True,
                             verbose=1)

earlystop = EarlyStopping(monitor = 'val_loss', 
                          min_delta = 0, 
                          patience = 3,
                          verbose = 1,
                          restore_best_weights = True)

reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',
                              factor = 0.2,
                              patience = 3,
                              verbose = 1,
                              min_delta = 0.0001)

# we put our call backs into a callback list
callbacks = [earlystop, checkpoint, reduce_lr]

# We use a very small learning rate 
model.compile(loss = 'categorical_crossentropy',
              optimizer = RMSprop(lr = 0.001),
              metrics = ['accuracy'])

nb_train_samples = 793
nb_validation_samples = 42
epochs = 10

history = model.fit_generator(
    train_generator,
    steps_per_epoch = nb_train_samples,
    epochs = epochs,
    callbacks = callbacks,
    validation_data = validation_generator,
    validation_steps = nb_validation_samples)