Python 3.x 曲线拟合的数据拟合不正确

Python 3.x 曲线拟合的数据拟合不正确,python-3.x,curve-fitting,Python 3.x,Curve Fitting,我有一些需要拟合的实验数据,这样我们就可以解释某些y值的x值 import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit from scipy.interpolate import interp1d #from xlrd import open_workbook points = np.array([(0, -0.0142294), (20, 0.030845878571428

我有一些需要拟合的实验数据,这样我们就可以解释某些y值的x值

import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
#from xlrd import open_workbook


points = np.array([(0, -0.0142294), (20, 0.0308458785714286), (50, 
 0.1091054), (100
 ,0.2379176875), (200, 0.404354166666667)])
x = points[:,0]
y = points[:,1]
def func(x, p1,p2):
  return p1*(1-np.e**(-p2*x))

popt, pcov = curve_fit(func, x, y)
p1 = popt[0]
p2 = popt[1]

curvex=np.linspace(0,200,1000)
fit = func(curvex, p1, p2)
plt.plot(x, y, 'yo', label='data')

f = interp1d(fit, curvex, kind = 'nearest')
print (f(100))
plt.plot(curvex,fit,'r', linewidth=1)

plt.plot(x,y,'x',label = 'Xsaved')

plt.show()

数据拟合不正确。非常感谢您的帮助。

这里是一个使用您的数据和方程的图形装配师示例,使用scipy的微分进化遗传算法提供初始参数估计。差分进化的scipy实现对拉丁超立方体算法进行了优化,以确保参数空间的彻底搜索,这需要搜索的范围。在本例中,我使用了数据的最大值和最小值作为搜索边界,在本例中似乎可以这样做。请注意,查找要搜索的范围要比查找特定值容易得多

import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings


points = numpy.array([(0, -0.0142294), (20, 0.0308458785714286), (50, 0.1091054), (100 ,0.2379176875), (200, 0.404354166666667)])
x = points[:,0]
y = points[:,1]

# rename to match previous example code below
xData = x
yData = y


def func(x, p1,p2):
  return p1*(1-numpy.exp(-p2*x))


# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
    warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
    val = func(xData, *parameterTuple)
    return numpy.sum((yData - val) ** 2.0)


def generate_Initial_Parameters():
    # min and max used for bounds
    maxX = max(xData)
    minX = min(xData)
    maxY = max(yData)
    minY = min(yData)

    minAllData = min(minX, minY)
    maxAllData = min(maxX, maxY)

    parameterBounds = []
    parameterBounds.append([minAllData, maxAllData]) # search bounds for p1
    parameterBounds.append([minAllData, maxAllData]) # search bounds for p2

    # "seed" the numpy random number generator for repeatable results
    result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
    return result.x

# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()

# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()

modelPredictions = func(xData, *fittedParameters) 

absError = modelPredictions - yData

SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))

print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)

print()


##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
    f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
    axes = f.add_subplot(111)

    # first the raw data as a scatter plot
    axes.plot(xData, yData,  'D')

    # create data for the fitted equation plot
    xModel = numpy.linspace(min(xData), max(xData))
    yModel = func(xModel, *fittedParameters)

    # now the model as a line plot
    axes.plot(xModel, yModel)

    axes.set_xlabel('X Data') # X axis data label
    axes.set_ylabel('Y Data') # Y axis data label

    plt.show()
    plt.close('all') # clean up after using pyplot

graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

这里是一个使用数据和方程的图形装配师示例,使用scipy的微分进化遗传算法提供初始参数估计。差分进化的scipy实现对拉丁超立方体算法进行了优化,以确保参数空间的彻底搜索,这需要搜索的范围。在本例中,我使用了数据的最大值和最小值作为搜索边界,在本例中似乎可以这样做。请注意,查找要搜索的范围要比查找特定值容易得多

import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings


points = numpy.array([(0, -0.0142294), (20, 0.0308458785714286), (50, 0.1091054), (100 ,0.2379176875), (200, 0.404354166666667)])
x = points[:,0]
y = points[:,1]

# rename to match previous example code below
xData = x
yData = y


def func(x, p1,p2):
  return p1*(1-numpy.exp(-p2*x))


# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
    warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
    val = func(xData, *parameterTuple)
    return numpy.sum((yData - val) ** 2.0)


def generate_Initial_Parameters():
    # min and max used for bounds
    maxX = max(xData)
    minX = min(xData)
    maxY = max(yData)
    minY = min(yData)

    minAllData = min(minX, minY)
    maxAllData = min(maxX, maxY)

    parameterBounds = []
    parameterBounds.append([minAllData, maxAllData]) # search bounds for p1
    parameterBounds.append([minAllData, maxAllData]) # search bounds for p2

    # "seed" the numpy random number generator for repeatable results
    result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
    return result.x

# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()

# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()

modelPredictions = func(xData, *fittedParameters) 

absError = modelPredictions - yData

SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))

print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)

print()


##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
    f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
    axes = f.add_subplot(111)

    # first the raw data as a scatter plot
    axes.plot(xData, yData,  'D')

    # create data for the fitted equation plot
    xModel = numpy.linspace(min(xData), max(xData))
    yModel = func(xModel, *fittedParameters)

    # now the model as a line plot
    axes.plot(xModel, yModel)

    axes.set_xlabel('X Data') # X axis data label
    axes.set_ylabel('Y Data') # Y axis data label

    plt.show()
    plt.close('all') # clean up after using pyplot

graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

您的起点太远了(默认为所有的,IIRC)。当运行
curve\u fit
add关键字
p0=[1,0.01]
时,你能详细说明一下你是如何得到p0值的吗?我只是尝试了不同的数字,直到它起作用。我和我的朋友仍然有点困惑。这些点做什么?
p0
是每次迭代时更新的起点,直到
曲线拟合
收敛到足够好的拟合,基本上
曲线拟合
p0
转化为
popt
。阅读更多信息,你的出发点已经远远偏离了(默认为所有的,IIRC)。当运行
curve\u fit
add关键字
p0=[1,0.01]
时,你能详细说明一下你是如何得到p0值的吗?我只是尝试了不同的数字,直到它起作用。我和我的朋友仍然有点困惑。这些点做什么?
p0
是每次迭代时更新的起点,直到
曲线拟合
收敛到足够好的拟合,基本上
曲线拟合
p0
转化为
popt
。有关更多信息,请阅读