Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/algorithm/11.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Algorithm 八度:逻辑回归:fmincg和fminunc之间的差异_Algorithm_Machine Learning_Neural Network_Octave - Fatal编程技术网

Algorithm 八度:逻辑回归:fmincg和fminunc之间的差异

Algorithm 八度:逻辑回归:fmincg和fminunc之间的差异,algorithm,machine-learning,neural-network,octave,Algorithm,Machine Learning,Neural Network,Octave,对于逻辑回归问题,我经常使用fminunc。我在网上读到过使用fmincg而不是fminunc,参数相同的文章。结果是不同的,通常fmincg更精确,但不太多。(我正在将fmincg函数fminunc的结果与相同的数据进行比较) 所以,我的问题是:这两个函数之间有什么区别?每个函数都实现了什么算法?(现在,我只是使用这些函数,而不知道它们是如何工作的) 谢谢:)您必须查看fmincg的代码,因为它不是倍频程的一部分。经过搜索,我发现这是Coursera机器学习课程作为家庭作业的一部分提供的函数文

对于逻辑回归问题,我经常使用
fminunc
。我在网上读到过使用
fmincg
而不是
fminunc
,参数相同的文章。结果是不同的,通常
fmincg
更精确,但不太多。(我正在将fmincg函数fminunc的结果与相同的数据进行比较)

所以,我的问题是:这两个函数之间有什么区别?每个函数都实现了什么算法?(现在,我只是使用这些函数,而不知道它们是如何工作的)


谢谢:)

您必须查看
fmincg
的代码,因为它不是倍频程的一部分。经过搜索,我发现这是Coursera机器学习课程作为家庭作业的一部分提供的函数文件。阅读上的评论和答案,了解有关算法的讨论

fmincg为什么起作用

这是一份源代码的副本,带有解释所用各种算法的注释。这是一种愚蠢的行为,就像孩子的大脑在学习区分狗和椅子时做的一样

这是fmincg.m的倍频程源

function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
% Minimize a continuous differentialble multivariate function. Starting point
% is given by "X" (D by 1), and the function named in the string "f", must
% return a function value and a vector of partial derivatives. The Polack-
% Ribiere flavour of conjugate gradients is used to compute search directions,
% and a line search using quadratic and cubic polynomial approximations and the
% Wolfe-Powell stopping criteria is used together with the slope ratio method
% for guessing initial step sizes. Additionally a bunch of checks are made to
% make sure that exploration is taking place and that extrapolation will not
% be unboundedly large. The "length" gives the length of the run: if it is
% positive, it gives the maximum number of line searches, if negative its
% absolute gives the maximum allowed number of function evaluations. You can
% (optionally) give "length" a second component, which will indicate the
% reduction in function value to be expected in the first line-search (defaults
% to 1.0). The function returns when either its length is up, or if no further
% progress can be made (ie, we are at a minimum, or so close that due to
% numerical problems, we cannot get any closer). If the function terminates
% within a few iterations, it could be an indication that the function value
% and derivatives are not consistent (ie, there may be a bug in the
% implementation of your "f" function). The function returns the found
% solution "X", a vector of function values "fX" indicating the progress made
% and "i" the number of iterations (line searches or function evaluations,
% depending on the sign of "length") used.
%
% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
%
% See also: checkgrad
%
% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
%
%
% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
%
% Permission is granted for anyone to copy, use, or modify these
% programs and accompanying documents for purposes of research or
% education, provided this copyright notice is retained, and note is
% made of any changes that have been made.
%
% These programs and documents are distributed without any warranty,
% express or implied.  As the programs were written for research
% purposes only, they have not been tested to the degree that would be
% advisable in any important application.  All use of these programs is
% entirely at the user's own risk.
%
% [ml-class] Changes Made:
% 1) Function name and argument specifications
% 2) Output display
%

% Read options
if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')
    length = options.MaxIter;
else
    length = 100;
end

RHO = 0.01;                            % a bunch of constants for line searches
SIG = 0.5;       % RHO and SIG are the constants in the Wolfe-Powell conditions
INT = 0.1;    % don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0;                    % extrapolate maximum 3 times the current bracket
MAX = 20;                         % max 20 function evaluations per line search
RATIO = 100;                                      % maximum allowed slope ratio

argstr = ['feval(f, X'];                      % compose string used to call function
for i = 1:(nargin - 3)
  argstr = [argstr, ',P', int2str(i)];
end
argstr = [argstr, ')'];

if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end
S=['Iteration '];

i = 0;                                            % zero the run length counter
ls_failed = 0;                             % no previous line search has failed
fX = [];
[f1 df1] = eval(argstr);                      % get function value and gradient
i = i + (length<0);                                            % count epochs?!
s = -df1;                                        % search direction is steepest
d1 = -s'*s;                                                 % this is the slope
z1 = red/(1-d1);                                  % initial step is red/(|s|+1)

while i < abs(length)                                      % while not finished
  i = i + (length>0);                                      % count iterations?!

  X0 = X; f0 = f1; df0 = df1;                   % make a copy of current values
  X = X + z1*s;                                             % begin line search
  [f2 df2] = eval(argstr);
  i = i + (length<0);                                          % count epochs?!
  d2 = df2'*s;
  f3 = f1; d3 = d1; z3 = -z1;             % initialize point 3 equal to point 1
  if length>0, M = MAX; else M = min(MAX, -length-i); end
  success = 0; limit = -1;                     % initialize quanteties
  while 1
    while ((f2 > f1+z1*RHO*d1) | (d2 > -SIG*d1)) & (M > 0)
      limit = z1;                                         % tighten the bracket
      if f2 > f1
        z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3);                 % quadratic fit
      else
        A = 6*(f2-f3)/z3+3*(d2+d3);                                 % cubic fit
        B = 3*(f3-f2)-z3*(d3+2*d2);
        z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A;       % numerical error possible - ok!
      end
      if isnan(z2) | isinf(z2)
        z2 = z3/2;                  % if we had a numerical problem then bisect
      end
      z2 = max(min(z2, INT*z3),(1-INT)*z3);  % don't accept too close to limits
      z1 = z1 + z2;                                           % update the step
      X = X + z2*s;
      [f2 df2] = eval(argstr);
      M = M - 1; i = i + (length<0);                           % count epochs?!
      d2 = df2'*s;
      z3 = z3-z2;                    % z3 is now relative to the location of z2
    end
    if f2 > f1+z1*RHO*d1 | d2 > -SIG*d1
      break;                                                % this is a failure
    elseif d2 > SIG*d1
      success = 1; break;                                             % success
    elseif M == 0
      break;                                                          % failure
    end
    A = 6*(f2-f3)/z3+3*(d2+d3);                      % make cubic extrapolation
    B = 3*(f3-f2)-z3*(d3+2*d2);
    z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3));        % num. error possible - ok!
    if ~isreal(z2) | isnan(z2) | isinf(z2) | z2 < 0   % num prob or wrong sign?
      if limit < -0.5                               % if we have no upper limit
        z2 = z1 * (EXT-1);                 % the extrapolate the maximum amount
      else
        z2 = (limit-z1)/2;                                   % otherwise bisect
      end
    elseif (limit > -0.5) & (z2+z1 > limit)          % extraplation beyond max?
      z2 = (limit-z1)/2;                                               % bisect
    elseif (limit < -0.5) & (z2+z1 > z1*EXT)       % extrapolation beyond limit
      z2 = z1*(EXT-1.0);                           % set to extrapolation limit
    elseif z2 < -z3*INT
      z2 = -z3*INT;
    elseif (limit > -0.5) & (z2 < (limit-z1)*(1.0-INT))   % too close to limit?
      z2 = (limit-z1)*(1.0-INT);
    end
    f3 = f2; d3 = d2; z3 = -z2;                  % set point 3 equal to point 2
    z1 = z1 + z2; X = X + z2*s;                      % update current estimates
    [f2 df2] = eval(argstr);
    M = M - 1; i = i + (length<0);                             % count epochs?!
    d2 = df2'*s;
  end                                                      % end of line search

  if success                                         % if line search succeeded
    f1 = f2; fX = [fX' f1]';
    fprintf('%s %4i | Cost: %4.6e\r', S, i, f1);
    s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2;      % Polack-Ribiere direction
    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
    d2 = df1'*s;
    if d2 > 0                                      % new slope must be negative
      s = -df1;                              % otherwise use steepest direction
      d2 = -s'*s;
    end
    z1 = z1 * min(RATIO, d1/(d2-realmin));          % slope ratio but max RATIO
    d1 = d2;
    ls_failed = 0;                              % this line search did not fail
  else
    X = X0; f1 = f0; df1 = df0;  % restore point from before failed line search
    if ls_failed | i > abs(length)          % line search failed twice in a row
      break;                             % or we ran out of time, so we give up
    end
    tmp = df1; df1 = df2; df2 = tmp;                         % swap derivatives
    s = -df1;                                                    % try steepest
    d1 = -s'*s;
    z1 = 1/(1-d1);
    ls_failed = 1;                                    % this line search failed
  end
  if exist('OCTAVE_VERSION')
    fflush(stdout);
  end
end
fprintf('\n');
function[X,fX,i]=fmincg(f,X,options,P1,P2,P3,P4,P5)
%最小化连续可微分多元函数。起点
%由“X”(D乘以1)给出,且字符串“f”中命名的函数必须
%返回函数值和偏导数向量。波兰人-
%共轭梯度的Ribiere味用于计算搜索方向,
%以及使用二次和三次多项式近似的线搜索和
%Wolfe-Powell停止准则与斜率法一起使用
%用于猜测初始步长。此外,还对以下各项进行了一系列检查:
%确保正在进行勘探,且不会进行外推
%无限大。“长度”给出了运行的长度:如果是
%如果为正值,则给出最大行搜索数,如果为负值,则给出其最大行搜索数
%绝对值给出了允许的最大函数求值次数。你可以
%(可选)为“长度”指定第二个组件,该组件将指示
%第一行搜索中预期的函数值减少(默认值
%至1.0)。当函数的长度增加或不再增加时,该函数返回
%我们可以取得进展(即,我们至少可以取得进展,或者由于
%数值问题,我们不能再接近了)。如果函数终止
%在几次迭代中,它可能表示函数值
%和衍生工具不一致(即,在
%实现您的“f”功能)。函数返回找到的值
%解决方案“X”,表示所取得进展的函数值“fX”向量
%和“i”迭代次数(行搜索或函数求值,
%取决于使用的“长度”符号。
%
%用法:[X,fX,i]=fmincg(f,X,options,P1,P2,P3,P4,P5)
%
%另见:checkgrad
%
%Carl Edward Rasmussen 2001和2002版权所有。日期2002-02-13
%
%
%(C)版权所有1999年、2000年和2001年,卡尔·爱德华·拉斯穆森
%
%允许任何人复制、使用或修改这些文件
%用于研究或试验目的的计划和随附文件
%教育,前提是保留本版权声明,且注释
%已进行的任何更改。
%
%这些程序和文件的分发没有任何保证,
%明示的或暗示的。因为这些程序是为研究而编写的
%仅出于目的,它们还没有被测试到需要的程度
%适用于任何重要应用。所有这些程序的使用都是免费的
%完全由用户自行承担风险。
%
%[ml类]所做的更改:
%1)函数名和参数规范
%2)输出显示
%
%阅读选项
如果存在('options','var')&&&~isempty(options)&&isfield(options,'MaxIter'))
长度=options.MaxIter;
其他的
长度=100;
结束
RHO=0.01;%用于行搜索的一组常量
SIG=0.5;%RHO和SIG是Wolfe-Powell条件下的常数
INT=0.1;%不要在当前括号限制的0.1范围内重新评估
EXT=3.0;%外推最大值为当前括号的3倍
最大值=20;%每行搜索最多20次功能评估
比率=100;%最大允许坡率
argstr=['feval(f,X'];%compose用于调用函数的字符串
对于i=1:(纳金-3)
argstr=[argstr',P',int2str(i)];
结束
argstr=[argstr',)'];
如果最大值(尺寸(长度))=2,则红色=长度(2);长度=长度(1);红色=1;结束
S=[‘迭代’];
i=0;%将游程计数器归零
ls_失败=0;%前一行搜索没有失败
外汇=[];
[f1 df1]=eval(argstr);%获取函数值和梯度
i=i+(长度0,M=MAX;否则M=min(MAX,-length-i);结束
成功=0;限制=-1;%初始化量子
而1
而((f2>f1+z1*RHO*d1)|(d2>-SIG*d1))和(M>0)
极限=z1;%拧紧支架
如果f2>f1
z2=z3-(0.5*d3*z3*z3)/(d3*z3+f2-f3);%二次拟合
其他的
A=6*(f2-f3)/z3+3*(d2+d3);%立方拟合
B=3*(f3-f2)-z3*(d3+2*d2);
z2=(sqrt(B*B-A*d2*z3*z3)-B)/A;%可能的数字错误-确定!
结束
如果isnan(z2)| isinf(z2)
z2=z3/2;%如果我们有一个数值问题,那么对分
结束
z2=最大值(最小值(z2,INT*z3),(1-INT)*z3);%不接受太接近极限
z1=z1+z2;%更新步骤
X=X+z2*s;
[f2 df2]=eval(argstr);
M=M-1;i=i+(长度0%新斜率必须为负
options = optimset('MaxIter', 50);
[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);
options = optimset('GradObj', 'on', 'MaxIter', 50);
[nn_params, cost, exit_flag] = fminunc(costFunction, initial_nn_params, options);
Testing lrCostFunction() with regularization
Cost: 2.534819
Expected cost: 2.534819
Gradients:
 0.146561
 -0.548558
 0.724722
 1.398003
Expected gradients:
 0.146561
 -0.548558
 0.724722
 1.398003
Program paused. Press enter to continue.

Training One-vs-All Logistic Regression...
id = 1512324857357
Elapsed time is 11.3794 seconds.
Program paused. Press enter to continue.

Training Set Accuracy: 93.100000
Testing lrCostFunction() with regularization
Cost: 2.534819
Expected cost: 2.534819
Gradients:
 0.146561
 -0.548558
 0.724722
 1.398003
Expected gradients:
 0.146561
 -0.548558
 0.724722
 1.398003
Program paused. Press enter to continue.

Training One-vs-All Logistic Regression...
id = 1512325280047
Elapsed time is 11.7978 seconds.

Training Set Accuracy: 95.120000