Python Pickle序列化:模块'__主&';没有属性';标记化&x27;

Python Pickle序列化:模块'__主&';没有属性';标记化&x27;,python,flask,scikit-learn,pickle,Python,Flask,Scikit Learn,Pickle,我有一个训练机器学习模型并通过pickle保存的脚本: 当我尝试将模型加载到网站时,会收到错误消息:“module'main'没有“tokenize”属性。 我已经尝试导入函数“tokenize”并直接在加载脚本中复制它,但没有任何效果 完整的培训脚本: import sys import pandas as pd import numpy as np from sqlalchemy import create_engine import nltk from nltk.tokenize impo

我有一个训练机器学习模型并通过pickle保存的脚本:

当我尝试将模型加载到网站时,会收到错误消息:“module'main'没有“tokenize”属性。 我已经尝试导入函数“tokenize”并直接在加载脚本中复制它,但没有任何效果

完整的培训脚本:

import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
import seaborn as sns
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, fbeta_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib

import pickle

nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')


def load_data(database_filepath):
    """takes path to db as input and loads data. Return X, Y and target_names"""
    engine = create_engine('sqlite:///{}'.format(database_filepath))
    df = pd.read_sql('disaster_data', engine)
    X = df.message.values
    Y = df.drop(['message', 'id', 'original', 'genre'], axis=1).values
    target_names = df.drop(['message', 'id', 'original', 'genre'], axis=1).columns

    return X, Y, target_names

def tokenize(text):
    """Takes a text as input an returns a list of tokenized words"""
    stop_words = stopwords.words("english")
    text = re.sub(r"[^a-zA-Z0-9]", " ", text).lower().strip()
    words = word_tokenize(text)
    clean_words = [w for w in words if w not in stopwords.words("english")]
    tokens = [WordNetLemmatizer().lemmatize(w) for w in words if w not in stop_words]
    clean_tokens = [PorterStemmer().stem(w) for w in tokens]

    return clean_tokens

def build_model():
    """Builds a model. returns a GridSearchCV object"""
    pipeline = Pipeline([
        ('vect', CountVectorizer(tokenizer=tokenize)),
        ('tfidf', TfidfTransformer()),
        ('clf', MultiOutputClassifier(RandomForestClassifier(), n_jobs=1)),
        ])
    parameters = {'clf__estimator__max_depth': [30],
                  'clf__estimator__min_samples_leaf': [5],
                  'clf__estimator__min_samples_split': [5],
                  'clf__estimator__n_estimators': [100]}



    return GridSearchCV(estimator=pipeline, param_grid=parameters, verbose=10, n_jobs=1)


def evaluate_model(model, X_test, Y_test, category_names):
    """Takes model, X_test, Y_test and category names as input and evaluates model"""
    y_pred = model.predict(X_test)
    print("Accuracy of the model :", (y_pred == Y_test).mean())
    for i in y_pred:
        print(classification_report(Y_test, y_pred, target_names=category_names))
        break


def save_model(model, model_filepath):
    """Takes model and path for saving as input and saves the model"""
    pickle.dump(model, open(model_filepath, 'wb'))
    # Uncommetn for joblib saving
    # joblib.dump(model, model_filepath)

def main():
    """Main function"""
    if len(sys.argv) == 3:
        database_filepath, model_filepath = sys.argv[1:]
        print('Loading data...\n    DATABASE: {}'.format(database_filepath))
        X, Y, category_names = load_data(database_filepath)
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)

        print('Building model...')
        model = build_model()

        print('Training model...')
        model.fit(X_train, Y_train)

        print('Evaluating model...')
        evaluate_model(model, X_test, Y_test, category_names)

        print('Saving model...\n    MODEL: {}'.format(model_filepath))
        save_model(model, model_filepath)

        print('Trained model saved!')

    else:
        print('Please provide the filepath of the disaster messages database '\
              'as the first argument and the filepath of the pickle file to '\
              'save the model to as the second argument. \n\nExample: python '\
              'train_classifier.py ../data/DisasterResponse.db classifier.pkl')


if __name__ == '__main__':
    main()
import json
import plotly
import pandas as pd
import nltk
import pickle
from nltk.stem import WordNetLemmatizer

from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sqlalchemy import create_engine
from nltk.corpus import stopwords

from flask import render_template
from wrangling_scripts.wrangle_data import return_figures
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from train_classifier_for_web import tokenize


nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')

app = Flask(__name__)


def tokenize(text):
    """Takes a text as input an returns a list of tokenized words"""
    stop_words = stopwords.words("english")
    text = re.sub(r"[^a-zA-Z0-9]", " ", text).lower().strip()
    words = word_tokenize(text)
    clean_words = [w for w in words if w not in stopwords.words("english")]
    tokens = [WordNetLemmatizer().lemmatize(w) for w in words if w not in stop_words]
    return [PorterStemmer().stem(w) for w in tokens]

    return clean_tokens

@app.before_first_request
def main():
    try:
        engine = create_engine('sqlite:///DisasterResponse.db')
        df = pd.read_sql_table('disaster_data', engine)
    except:
        print("path error to sql db")
    try:
        model = joblib.load('web_model.sav','rb')
    except Exception as e:
        print("cant load model", e)
正在加载脚本:

import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import re
import seaborn as sns
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, fbeta_score, make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib

import pickle

nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')


def load_data(database_filepath):
    """takes path to db as input and loads data. Return X, Y and target_names"""
    engine = create_engine('sqlite:///{}'.format(database_filepath))
    df = pd.read_sql('disaster_data', engine)
    X = df.message.values
    Y = df.drop(['message', 'id', 'original', 'genre'], axis=1).values
    target_names = df.drop(['message', 'id', 'original', 'genre'], axis=1).columns

    return X, Y, target_names

def tokenize(text):
    """Takes a text as input an returns a list of tokenized words"""
    stop_words = stopwords.words("english")
    text = re.sub(r"[^a-zA-Z0-9]", " ", text).lower().strip()
    words = word_tokenize(text)
    clean_words = [w for w in words if w not in stopwords.words("english")]
    tokens = [WordNetLemmatizer().lemmatize(w) for w in words if w not in stop_words]
    clean_tokens = [PorterStemmer().stem(w) for w in tokens]

    return clean_tokens

def build_model():
    """Builds a model. returns a GridSearchCV object"""
    pipeline = Pipeline([
        ('vect', CountVectorizer(tokenizer=tokenize)),
        ('tfidf', TfidfTransformer()),
        ('clf', MultiOutputClassifier(RandomForestClassifier(), n_jobs=1)),
        ])
    parameters = {'clf__estimator__max_depth': [30],
                  'clf__estimator__min_samples_leaf': [5],
                  'clf__estimator__min_samples_split': [5],
                  'clf__estimator__n_estimators': [100]}



    return GridSearchCV(estimator=pipeline, param_grid=parameters, verbose=10, n_jobs=1)


def evaluate_model(model, X_test, Y_test, category_names):
    """Takes model, X_test, Y_test and category names as input and evaluates model"""
    y_pred = model.predict(X_test)
    print("Accuracy of the model :", (y_pred == Y_test).mean())
    for i in y_pred:
        print(classification_report(Y_test, y_pred, target_names=category_names))
        break


def save_model(model, model_filepath):
    """Takes model and path for saving as input and saves the model"""
    pickle.dump(model, open(model_filepath, 'wb'))
    # Uncommetn for joblib saving
    # joblib.dump(model, model_filepath)

def main():
    """Main function"""
    if len(sys.argv) == 3:
        database_filepath, model_filepath = sys.argv[1:]
        print('Loading data...\n    DATABASE: {}'.format(database_filepath))
        X, Y, category_names = load_data(database_filepath)
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)

        print('Building model...')
        model = build_model()

        print('Training model...')
        model.fit(X_train, Y_train)

        print('Evaluating model...')
        evaluate_model(model, X_test, Y_test, category_names)

        print('Saving model...\n    MODEL: {}'.format(model_filepath))
        save_model(model, model_filepath)

        print('Trained model saved!')

    else:
        print('Please provide the filepath of the disaster messages database '\
              'as the first argument and the filepath of the pickle file to '\
              'save the model to as the second argument. \n\nExample: python '\
              'train_classifier.py ../data/DisasterResponse.db classifier.pkl')


if __name__ == '__main__':
    main()
import json
import plotly
import pandas as pd
import nltk
import pickle
from nltk.stem import WordNetLemmatizer

from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sqlalchemy import create_engine
from nltk.corpus import stopwords

from flask import render_template
from wrangling_scripts.wrangle_data import return_figures
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from train_classifier_for_web import tokenize


nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')

app = Flask(__name__)


def tokenize(text):
    """Takes a text as input an returns a list of tokenized words"""
    stop_words = stopwords.words("english")
    text = re.sub(r"[^a-zA-Z0-9]", " ", text).lower().strip()
    words = word_tokenize(text)
    clean_words = [w for w in words if w not in stopwords.words("english")]
    tokens = [WordNetLemmatizer().lemmatize(w) for w in words if w not in stop_words]
    return [PorterStemmer().stem(w) for w in tokens]

    return clean_tokens

@app.before_first_request
def main():
    try:
        engine = create_engine('sqlite:///DisasterResponse.db')
        df = pd.read_sql_table('disaster_data', engine)
    except:
        print("path error to sql db")
    try:
        model = joblib.load('web_model.sav','rb')
    except Exception as e:
        print("cant load model", e)
已解决:


我尝试将tokenize函数保存在一个单独的模块中,并以相同的方式将其导入到培训脚本和加载脚本中

显示完整的培训脚本。有一个函数
“tokenize”
,它应该在加载时也可用。现在显示。函数tokenize在加载脚本中也可用。请尝试将
tokenize(text)
方法移动到
main()
method中。我尝试将其移动到内部,但不起作用。我也试着不使用主功能。只是装载,还是没有