LevenbergMarquardtOptimizer.java

  1. /*
  2.  * Licensed to the Apache Software Foundation (ASF) under one or more
  3.  * contributor license agreements.  See the NOTICE file distributed with
  4.  * this work for additional information regarding copyright ownership.
  5.  * The ASF licenses this file to You under the Apache License, Version 2.0
  6.  * (the "License"); you may not use this file except in compliance with
  7.  * the License.  You may obtain a copy of the License at
  8.  *
  9.  *      http://www.apache.org/licenses/LICENSE-2.0
  10.  *
  11.  * Unless required by applicable law or agreed to in writing, software
  12.  * distributed under the License is distributed on an "AS IS" BASIS,
  13.  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14.  * See the License for the specific language governing permissions and
  15.  * limitations under the License.
  16.  */
  17. package org.apache.commons.math3.optimization.general;

  18. import java.util.Arrays;

  19. import org.apache.commons.math3.exception.ConvergenceException;
  20. import org.apache.commons.math3.exception.util.LocalizedFormats;
  21. import org.apache.commons.math3.optimization.PointVectorValuePair;
  22. import org.apache.commons.math3.optimization.ConvergenceChecker;
  23. import org.apache.commons.math3.linear.RealMatrix;
  24. import org.apache.commons.math3.util.Precision;
  25. import org.apache.commons.math3.util.FastMath;


  26. /**
  27.  * This class solves a least squares problem using the Levenberg-Marquardt algorithm.
  28.  *
  29.  * <p>This implementation <em>should</em> work even for over-determined systems
  30.  * (i.e. systems having more point than equations). Over-determined systems
  31.  * are solved by ignoring the point which have the smallest impact according
  32.  * to their jacobian column norm. Only the rank of the matrix and some loop bounds
  33.  * are changed to implement this.</p>
  34.  *
  35.  * <p>The resolution engine is a simple translation of the MINPACK <a
  36.  * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
  37.  * changes. The changes include the over-determined resolution, the use of
  38.  * inherited convergence checker and the Q.R. decomposition which has been
  39.  * rewritten following the algorithm described in the
  40.  * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
  41.  * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
  42.  * <p>The authors of the original fortran version are:
  43.  * <ul>
  44.  * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  45.  * <li>Burton S. Garbow</li>
  46.  * <li>Kenneth E. Hillstrom</li>
  47.  * <li>Jorge J. More</li>
  48.  * </ul>
  49.  * The redistribution policy for MINPACK is available <a
  50.  * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
  51.  * is reproduced below.</p>
  52.  *
  53.  * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
  54.  * <tr><td>
  55.  *    Minpack Copyright Notice (1999) University of Chicago.
  56.  *    All rights reserved
  57.  * </td></tr>
  58.  * <tr><td>
  59.  * Redistribution and use in source and binary forms, with or without
  60.  * modification, are permitted provided that the following conditions
  61.  * are met:
  62.  * <ol>
  63.  *  <li>Redistributions of source code must retain the above copyright
  64.  *      notice, this list of conditions and the following disclaimer.</li>
  65.  * <li>Redistributions in binary form must reproduce the above
  66.  *     copyright notice, this list of conditions and the following
  67.  *     disclaimer in the documentation and/or other materials provided
  68.  *     with the distribution.</li>
  69.  * <li>The end-user documentation included with the redistribution, if any,
  70.  *     must include the following acknowledgment:
  71.  *     <code>This product includes software developed by the University of
  72.  *           Chicago, as Operator of Argonne National Laboratory.</code>
  73.  *     Alternately, this acknowledgment may appear in the software itself,
  74.  *     if and wherever such third-party acknowledgments normally appear.</li>
  75.  * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
  76.  *     WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
  77.  *     UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
  78.  *     THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
  79.  *     IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
  80.  *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
  81.  *     OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
  82.  *     OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
  83.  *     USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
  84.  *     THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
  85.  *     DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
  86.  *     UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
  87.  *     BE CORRECTED.</strong></li>
  88.  * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
  89.  *     HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
  90.  *     ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
  91.  *     INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
  92.  *     ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
  93.  *     PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
  94.  *     SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
  95.  *     (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
  96.  *     EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
  97.  *     POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
  98.  * <ol></td></tr>
  99.  * </table>
  100.  * @deprecated As of 3.1 (to be removed in 4.0).
  101.  * @since 2.0
  102.  *
  103.  */
  104. @Deprecated
  105. public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer {
  106.     /** Number of solved point. */
  107.     private int solvedCols;
  108.     /** Diagonal elements of the R matrix in the Q.R. decomposition. */
  109.     private double[] diagR;
  110.     /** Norms of the columns of the jacobian matrix. */
  111.     private double[] jacNorm;
  112.     /** Coefficients of the Householder transforms vectors. */
  113.     private double[] beta;
  114.     /** Columns permutation array. */
  115.     private int[] permutation;
  116.     /** Rank of the jacobian matrix. */
  117.     private int rank;
  118.     /** Levenberg-Marquardt parameter. */
  119.     private double lmPar;
  120.     /** Parameters evolution direction associated with lmPar. */
  121.     private double[] lmDir;
  122.     /** Positive input variable used in determining the initial step bound. */
  123.     private final double initialStepBoundFactor;
  124.     /** Desired relative error in the sum of squares. */
  125.     private final double costRelativeTolerance;
  126.     /**  Desired relative error in the approximate solution parameters. */
  127.     private final double parRelativeTolerance;
  128.     /** Desired max cosine on the orthogonality between the function vector
  129.      * and the columns of the jacobian. */
  130.     private final double orthoTolerance;
  131.     /** Threshold for QR ranking. */
  132.     private final double qrRankingThreshold;
  133.     /** Weighted residuals. */
  134.     private double[] weightedResidual;
  135.     /** Weighted Jacobian. */
  136.     private double[][] weightedJacobian;

  137.     /**
  138.      * Build an optimizer for least squares problems with default values
  139.      * for all the tuning parameters (see the {@link
  140.      * #LevenbergMarquardtOptimizer(double,double,double,double,double)
  141.      * other contructor}.
  142.      * The default values for the algorithm settings are:
  143.      * <ul>
  144.      *  <li>Initial step bound factor: 100</li>
  145.      *  <li>Cost relative tolerance: 1e-10</li>
  146.      *  <li>Parameters relative tolerance: 1e-10</li>
  147.      *  <li>Orthogonality tolerance: 1e-10</li>
  148.      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
  149.      * </ul>
  150.      */
  151.     public LevenbergMarquardtOptimizer() {
  152.         this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
  153.     }

  154.     /**
  155.      * Constructor that allows the specification of a custom convergence
  156.      * checker.
  157.      * Note that all the usual convergence checks will be <em>disabled</em>.
  158.      * The default values for the algorithm settings are:
  159.      * <ul>
  160.      *  <li>Initial step bound factor: 100</li>
  161.      *  <li>Cost relative tolerance: 1e-10</li>
  162.      *  <li>Parameters relative tolerance: 1e-10</li>
  163.      *  <li>Orthogonality tolerance: 1e-10</li>
  164.      *  <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
  165.      * </ul>
  166.      *
  167.      * @param checker Convergence checker.
  168.      */
  169.     public LevenbergMarquardtOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
  170.         this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
  171.     }

  172.     /**
  173.      * Constructor that allows the specification of a custom convergence
  174.      * checker, in addition to the standard ones.
  175.      *
  176.      * @param initialStepBoundFactor Positive input variable used in
  177.      * determining the initial step bound. This bound is set to the
  178.      * product of initialStepBoundFactor and the euclidean norm of
  179.      * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
  180.      * itself. In most cases factor should lie in the interval
  181.      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
  182.      * @param checker Convergence checker.
  183.      * @param costRelativeTolerance Desired relative error in the sum of
  184.      * squares.
  185.      * @param parRelativeTolerance Desired relative error in the approximate
  186.      * solution parameters.
  187.      * @param orthoTolerance Desired max cosine on the orthogonality between
  188.      * the function vector and the columns of the Jacobian.
  189.      * @param threshold Desired threshold for QR ranking. If the squared norm
  190.      * of a column vector is smaller or equal to this threshold during QR
  191.      * decomposition, it is considered to be a zero vector and hence the rank
  192.      * of the matrix is reduced.
  193.      */
  194.     public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
  195.                                        ConvergenceChecker<PointVectorValuePair> checker,
  196.                                        double costRelativeTolerance,
  197.                                        double parRelativeTolerance,
  198.                                        double orthoTolerance,
  199.                                        double threshold) {
  200.         super(checker);
  201.         this.initialStepBoundFactor = initialStepBoundFactor;
  202.         this.costRelativeTolerance = costRelativeTolerance;
  203.         this.parRelativeTolerance = parRelativeTolerance;
  204.         this.orthoTolerance = orthoTolerance;
  205.         this.qrRankingThreshold = threshold;
  206.     }

  207.     /**
  208.      * Build an optimizer for least squares problems with default values
  209.      * for some of the tuning parameters (see the {@link
  210.      * #LevenbergMarquardtOptimizer(double,double,double,double,double)
  211.      * other contructor}.
  212.      * The default values for the algorithm settings are:
  213.      * <ul>
  214.      *  <li>Initial step bound factor}: 100</li>
  215.      *  <li>QR ranking threshold}: {@link Precision#SAFE_MIN}</li>
  216.      * </ul>
  217.      *
  218.      * @param costRelativeTolerance Desired relative error in the sum of
  219.      * squares.
  220.      * @param parRelativeTolerance Desired relative error in the approximate
  221.      * solution parameters.
  222.      * @param orthoTolerance Desired max cosine on the orthogonality between
  223.      * the function vector and the columns of the Jacobian.
  224.      */
  225.     public LevenbergMarquardtOptimizer(double costRelativeTolerance,
  226.                                        double parRelativeTolerance,
  227.                                        double orthoTolerance) {
  228.         this(100,
  229.              costRelativeTolerance, parRelativeTolerance, orthoTolerance,
  230.              Precision.SAFE_MIN);
  231.     }

  232.     /**
  233.      * The arguments control the behaviour of the default convergence checking
  234.      * procedure.
  235.      * Additional criteria can defined through the setting of a {@link
  236.      * ConvergenceChecker}.
  237.      *
  238.      * @param initialStepBoundFactor Positive input variable used in
  239.      * determining the initial step bound. This bound is set to the
  240.      * product of initialStepBoundFactor and the euclidean norm of
  241.      * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
  242.      * itself. In most cases factor should lie in the interval
  243.      * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
  244.      * @param costRelativeTolerance Desired relative error in the sum of
  245.      * squares.
  246.      * @param parRelativeTolerance Desired relative error in the approximate
  247.      * solution parameters.
  248.      * @param orthoTolerance Desired max cosine on the orthogonality between
  249.      * the function vector and the columns of the Jacobian.
  250.      * @param threshold Desired threshold for QR ranking. If the squared norm
  251.      * of a column vector is smaller or equal to this threshold during QR
  252.      * decomposition, it is considered to be a zero vector and hence the rank
  253.      * of the matrix is reduced.
  254.      */
  255.     public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
  256.                                        double costRelativeTolerance,
  257.                                        double parRelativeTolerance,
  258.                                        double orthoTolerance,
  259.                                        double threshold) {
  260.         super(null); // No custom convergence criterion.
  261.         this.initialStepBoundFactor = initialStepBoundFactor;
  262.         this.costRelativeTolerance = costRelativeTolerance;
  263.         this.parRelativeTolerance = parRelativeTolerance;
  264.         this.orthoTolerance = orthoTolerance;
  265.         this.qrRankingThreshold = threshold;
  266.     }

  267.     /** {@inheritDoc} */
  268.     @Override
  269.     protected PointVectorValuePair doOptimize() {
  270.         final int nR = getTarget().length; // Number of observed data.
  271.         final double[] currentPoint = getStartPoint();
  272.         final int nC = currentPoint.length; // Number of parameters.

  273.         // arrays shared with the other private methods
  274.         solvedCols  = FastMath.min(nR, nC);
  275.         diagR       = new double[nC];
  276.         jacNorm     = new double[nC];
  277.         beta        = new double[nC];
  278.         permutation = new int[nC];
  279.         lmDir       = new double[nC];

  280.         // local point
  281.         double   delta   = 0;
  282.         double   xNorm   = 0;
  283.         double[] diag    = new double[nC];
  284.         double[] oldX    = new double[nC];
  285.         double[] oldRes  = new double[nR];
  286.         double[] oldObj  = new double[nR];
  287.         double[] qtf     = new double[nR];
  288.         double[] work1   = new double[nC];
  289.         double[] work2   = new double[nC];
  290.         double[] work3   = new double[nC];

  291.         final RealMatrix weightMatrixSqrt = getWeightSquareRoot();

  292.         // Evaluate the function at the starting point and calculate its norm.
  293.         double[] currentObjective = computeObjectiveValue(currentPoint);
  294.         double[] currentResiduals = computeResiduals(currentObjective);
  295.         PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
  296.         double currentCost = computeCost(currentResiduals);

  297.         // Outer loop.
  298.         lmPar = 0;
  299.         boolean firstIteration = true;
  300.         int iter = 0;
  301.         final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker();
  302.         while (true) {
  303.             ++iter;
  304.             final PointVectorValuePair previous = current;

  305.             // QR decomposition of the jacobian matrix
  306.             qrDecomposition(computeWeightedJacobian(currentPoint));

  307.             weightedResidual = weightMatrixSqrt.operate(currentResiduals);
  308.             for (int i = 0; i < nR; i++) {
  309.                 qtf[i] = weightedResidual[i];
  310.             }

  311.             // compute Qt.res
  312.             qTy(qtf);

  313.             // now we don't need Q anymore,
  314.             // so let jacobian contain the R matrix with its diagonal elements
  315.             for (int k = 0; k < solvedCols; ++k) {
  316.                 int pk = permutation[k];
  317.                 weightedJacobian[k][pk] = diagR[pk];
  318.             }

  319.             if (firstIteration) {
  320.                 // scale the point according to the norms of the columns
  321.                 // of the initial jacobian
  322.                 xNorm = 0;
  323.                 for (int k = 0; k < nC; ++k) {
  324.                     double dk = jacNorm[k];
  325.                     if (dk == 0) {
  326.                         dk = 1.0;
  327.                     }
  328.                     double xk = dk * currentPoint[k];
  329.                     xNorm  += xk * xk;
  330.                     diag[k] = dk;
  331.                 }
  332.                 xNorm = FastMath.sqrt(xNorm);

  333.                 // initialize the step bound delta
  334.                 delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
  335.             }

  336.             // check orthogonality between function vector and jacobian columns
  337.             double maxCosine = 0;
  338.             if (currentCost != 0) {
  339.                 for (int j = 0; j < solvedCols; ++j) {
  340.                     int    pj = permutation[j];
  341.                     double s  = jacNorm[pj];
  342.                     if (s != 0) {
  343.                         double sum = 0;
  344.                         for (int i = 0; i <= j; ++i) {
  345.                             sum += weightedJacobian[i][pj] * qtf[i];
  346.                         }
  347.                         maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
  348.                     }
  349.                 }
  350.             }
  351.             if (maxCosine <= orthoTolerance) {
  352.                 // Convergence has been reached.
  353.                 setCost(currentCost);
  354.                 // Update (deprecated) "point" field.
  355.                 point = current.getPoint();
  356.                 return current;
  357.             }

  358.             // rescale if necessary
  359.             for (int j = 0; j < nC; ++j) {
  360.                 diag[j] = FastMath.max(diag[j], jacNorm[j]);
  361.             }

  362.             // Inner loop.
  363.             for (double ratio = 0; ratio < 1.0e-4;) {

  364.                 // save the state
  365.                 for (int j = 0; j < solvedCols; ++j) {
  366.                     int pj = permutation[j];
  367.                     oldX[pj] = currentPoint[pj];
  368.                 }
  369.                 final double previousCost = currentCost;
  370.                 double[] tmpVec = weightedResidual;
  371.                 weightedResidual = oldRes;
  372.                 oldRes    = tmpVec;
  373.                 tmpVec    = currentObjective;
  374.                 currentObjective = oldObj;
  375.                 oldObj    = tmpVec;

  376.                 // determine the Levenberg-Marquardt parameter
  377.                 determineLMParameter(qtf, delta, diag, work1, work2, work3);

  378.                 // compute the new point and the norm of the evolution direction
  379.                 double lmNorm = 0;
  380.                 for (int j = 0; j < solvedCols; ++j) {
  381.                     int pj = permutation[j];
  382.                     lmDir[pj] = -lmDir[pj];
  383.                     currentPoint[pj] = oldX[pj] + lmDir[pj];
  384.                     double s = diag[pj] * lmDir[pj];
  385.                     lmNorm  += s * s;
  386.                 }
  387.                 lmNorm = FastMath.sqrt(lmNorm);
  388.                 // on the first iteration, adjust the initial step bound.
  389.                 if (firstIteration) {
  390.                     delta = FastMath.min(delta, lmNorm);
  391.                 }

  392.                 // Evaluate the function at x + p and calculate its norm.
  393.                 currentObjective = computeObjectiveValue(currentPoint);
  394.                 currentResiduals = computeResiduals(currentObjective);
  395.                 current = new PointVectorValuePair(currentPoint, currentObjective);
  396.                 currentCost = computeCost(currentResiduals);

  397.                 // compute the scaled actual reduction
  398.                 double actRed = -1.0;
  399.                 if (0.1 * currentCost < previousCost) {
  400.                     double r = currentCost / previousCost;
  401.                     actRed = 1.0 - r * r;
  402.                 }

  403.                 // compute the scaled predicted reduction
  404.                 // and the scaled directional derivative
  405.                 for (int j = 0; j < solvedCols; ++j) {
  406.                     int pj = permutation[j];
  407.                     double dirJ = lmDir[pj];
  408.                     work1[j] = 0;
  409.                     for (int i = 0; i <= j; ++i) {
  410.                         work1[i] += weightedJacobian[i][pj] * dirJ;
  411.                     }
  412.                 }
  413.                 double coeff1 = 0;
  414.                 for (int j = 0; j < solvedCols; ++j) {
  415.                     coeff1 += work1[j] * work1[j];
  416.                 }
  417.                 double pc2 = previousCost * previousCost;
  418.                 coeff1 /= pc2;
  419.                 double coeff2 = lmPar * lmNorm * lmNorm / pc2;
  420.                 double preRed = coeff1 + 2 * coeff2;
  421.                 double dirDer = -(coeff1 + coeff2);

  422.                 // ratio of the actual to the predicted reduction
  423.                 ratio = (preRed == 0) ? 0 : (actRed / preRed);

  424.                 // update the step bound
  425.                 if (ratio <= 0.25) {
  426.                     double tmp =
  427.                         (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
  428.                         if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
  429.                             tmp = 0.1;
  430.                         }
  431.                         delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
  432.                         lmPar /= tmp;
  433.                 } else if ((lmPar == 0) || (ratio >= 0.75)) {
  434.                     delta = 2 * lmNorm;
  435.                     lmPar *= 0.5;
  436.                 }

  437.                 // test for successful iteration.
  438.                 if (ratio >= 1.0e-4) {
  439.                     // successful iteration, update the norm
  440.                     firstIteration = false;
  441.                     xNorm = 0;
  442.                     for (int k = 0; k < nC; ++k) {
  443.                         double xK = diag[k] * currentPoint[k];
  444.                         xNorm += xK * xK;
  445.                     }
  446.                     xNorm = FastMath.sqrt(xNorm);

  447.                     // tests for convergence.
  448.                     if (checker != null && checker.converged(iter, previous, current)) {
  449.                         setCost(currentCost);
  450.                         // Update (deprecated) "point" field.
  451.                         point = current.getPoint();
  452.                         return current;
  453.                     }
  454.                 } else {
  455.                     // failed iteration, reset the previous values
  456.                     currentCost = previousCost;
  457.                     for (int j = 0; j < solvedCols; ++j) {
  458.                         int pj = permutation[j];
  459.                         currentPoint[pj] = oldX[pj];
  460.                     }
  461.                     tmpVec    = weightedResidual;
  462.                     weightedResidual = oldRes;
  463.                     oldRes    = tmpVec;
  464.                     tmpVec    = currentObjective;
  465.                     currentObjective = oldObj;
  466.                     oldObj    = tmpVec;
  467.                     // Reset "current" to previous values.
  468.                     current = new PointVectorValuePair(currentPoint, currentObjective);
  469.                 }

  470.                 // Default convergence criteria.
  471.                 if ((FastMath.abs(actRed) <= costRelativeTolerance &&
  472.                      preRed <= costRelativeTolerance &&
  473.                      ratio <= 2.0) ||
  474.                     delta <= parRelativeTolerance * xNorm) {
  475.                     setCost(currentCost);
  476.                     // Update (deprecated) "point" field.
  477.                     point = current.getPoint();
  478.                     return current;
  479.                 }

  480.                 // tests for termination and stringent tolerances
  481.                 // (2.2204e-16 is the machine epsilon for IEEE754)
  482.                 if ((FastMath.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
  483.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
  484.                                                    costRelativeTolerance);
  485.                 } else if (delta <= 2.2204e-16 * xNorm) {
  486.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
  487.                                                    parRelativeTolerance);
  488.                 } else if (maxCosine <= 2.2204e-16)  {
  489.                     throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
  490.                                                    orthoTolerance);
  491.                 }
  492.             }
  493.         }
  494.     }

  495.     /**
  496.      * Determine the Levenberg-Marquardt parameter.
  497.      * <p>This implementation is a translation in Java of the MINPACK
  498.      * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
  499.      * routine.</p>
  500.      * <p>This method sets the lmPar and lmDir attributes.</p>
  501.      * <p>The authors of the original fortran function are:</p>
  502.      * <ul>
  503.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  504.      *   <li>Burton  S. Garbow</li>
  505.      *   <li>Kenneth E. Hillstrom</li>
  506.      *   <li>Jorge   J. More</li>
  507.      * </ul>
  508.      * <p>Luc Maisonobe did the Java translation.</p>
  509.      *
  510.      * @param qy array containing qTy
  511.      * @param delta upper bound on the euclidean norm of diagR * lmDir
  512.      * @param diag diagonal matrix
  513.      * @param work1 work array
  514.      * @param work2 work array
  515.      * @param work3 work array
  516.      */
  517.     private void determineLMParameter(double[] qy, double delta, double[] diag,
  518.                                       double[] work1, double[] work2, double[] work3) {
  519.         final int nC = weightedJacobian[0].length;

  520.         // compute and store in x the gauss-newton direction, if the
  521.         // jacobian is rank-deficient, obtain a least squares solution
  522.         for (int j = 0; j < rank; ++j) {
  523.             lmDir[permutation[j]] = qy[j];
  524.         }
  525.         for (int j = rank; j < nC; ++j) {
  526.             lmDir[permutation[j]] = 0;
  527.         }
  528.         for (int k = rank - 1; k >= 0; --k) {
  529.             int pk = permutation[k];
  530.             double ypk = lmDir[pk] / diagR[pk];
  531.             for (int i = 0; i < k; ++i) {
  532.                 lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
  533.             }
  534.             lmDir[pk] = ypk;
  535.         }

  536.         // evaluate the function at the origin, and test
  537.         // for acceptance of the Gauss-Newton direction
  538.         double dxNorm = 0;
  539.         for (int j = 0; j < solvedCols; ++j) {
  540.             int pj = permutation[j];
  541.             double s = diag[pj] * lmDir[pj];
  542.             work1[pj] = s;
  543.             dxNorm += s * s;
  544.         }
  545.         dxNorm = FastMath.sqrt(dxNorm);
  546.         double fp = dxNorm - delta;
  547.         if (fp <= 0.1 * delta) {
  548.             lmPar = 0;
  549.             return;
  550.         }

  551.         // if the jacobian is not rank deficient, the Newton step provides
  552.         // a lower bound, parl, for the zero of the function,
  553.         // otherwise set this bound to zero
  554.         double sum2;
  555.         double parl = 0;
  556.         if (rank == solvedCols) {
  557.             for (int j = 0; j < solvedCols; ++j) {
  558.                 int pj = permutation[j];
  559.                 work1[pj] *= diag[pj] / dxNorm;
  560.             }
  561.             sum2 = 0;
  562.             for (int j = 0; j < solvedCols; ++j) {
  563.                 int pj = permutation[j];
  564.                 double sum = 0;
  565.                 for (int i = 0; i < j; ++i) {
  566.                     sum += weightedJacobian[i][pj] * work1[permutation[i]];
  567.                 }
  568.                 double s = (work1[pj] - sum) / diagR[pj];
  569.                 work1[pj] = s;
  570.                 sum2 += s * s;
  571.             }
  572.             parl = fp / (delta * sum2);
  573.         }

  574.         // calculate an upper bound, paru, for the zero of the function
  575.         sum2 = 0;
  576.         for (int j = 0; j < solvedCols; ++j) {
  577.             int pj = permutation[j];
  578.             double sum = 0;
  579.             for (int i = 0; i <= j; ++i) {
  580.                 sum += weightedJacobian[i][pj] * qy[i];
  581.             }
  582.             sum /= diag[pj];
  583.             sum2 += sum * sum;
  584.         }
  585.         double gNorm = FastMath.sqrt(sum2);
  586.         double paru = gNorm / delta;
  587.         if (paru == 0) {
  588.             // 2.2251e-308 is the smallest positive real for IEE754
  589.             paru = 2.2251e-308 / FastMath.min(delta, 0.1);
  590.         }

  591.         // if the input par lies outside of the interval (parl,paru),
  592.         // set par to the closer endpoint
  593.         lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
  594.         if (lmPar == 0) {
  595.             lmPar = gNorm / dxNorm;
  596.         }

  597.         for (int countdown = 10; countdown >= 0; --countdown) {

  598.             // evaluate the function at the current value of lmPar
  599.             if (lmPar == 0) {
  600.                 lmPar = FastMath.max(2.2251e-308, 0.001 * paru);
  601.             }
  602.             double sPar = FastMath.sqrt(lmPar);
  603.             for (int j = 0; j < solvedCols; ++j) {
  604.                 int pj = permutation[j];
  605.                 work1[pj] = sPar * diag[pj];
  606.             }
  607.             determineLMDirection(qy, work1, work2, work3);

  608.             dxNorm = 0;
  609.             for (int j = 0; j < solvedCols; ++j) {
  610.                 int pj = permutation[j];
  611.                 double s = diag[pj] * lmDir[pj];
  612.                 work3[pj] = s;
  613.                 dxNorm += s * s;
  614.             }
  615.             dxNorm = FastMath.sqrt(dxNorm);
  616.             double previousFP = fp;
  617.             fp = dxNorm - delta;

  618.             // if the function is small enough, accept the current value
  619.             // of lmPar, also test for the exceptional cases where parl is zero
  620.             if ((FastMath.abs(fp) <= 0.1 * delta) ||
  621.                     ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
  622.                 return;
  623.             }

  624.             // compute the Newton correction
  625.             for (int j = 0; j < solvedCols; ++j) {
  626.                 int pj = permutation[j];
  627.                 work1[pj] = work3[pj] * diag[pj] / dxNorm;
  628.             }
  629.             for (int j = 0; j < solvedCols; ++j) {
  630.                 int pj = permutation[j];
  631.                 work1[pj] /= work2[j];
  632.                 double tmp = work1[pj];
  633.                 for (int i = j + 1; i < solvedCols; ++i) {
  634.                     work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
  635.                 }
  636.             }
  637.             sum2 = 0;
  638.             for (int j = 0; j < solvedCols; ++j) {
  639.                 double s = work1[permutation[j]];
  640.                 sum2 += s * s;
  641.             }
  642.             double correction = fp / (delta * sum2);

  643.             // depending on the sign of the function, update parl or paru.
  644.             if (fp > 0) {
  645.                 parl = FastMath.max(parl, lmPar);
  646.             } else if (fp < 0) {
  647.                 paru = FastMath.min(paru, lmPar);
  648.             }

  649.             // compute an improved estimate for lmPar
  650.             lmPar = FastMath.max(parl, lmPar + correction);

  651.         }
  652.     }

  653.     /**
  654.      * Solve a*x = b and d*x = 0 in the least squares sense.
  655.      * <p>This implementation is a translation in Java of the MINPACK
  656.      * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
  657.      * routine.</p>
  658.      * <p>This method sets the lmDir and lmDiag attributes.</p>
  659.      * <p>The authors of the original fortran function are:</p>
  660.      * <ul>
  661.      *   <li>Argonne National Laboratory. MINPACK project. March 1980</li>
  662.      *   <li>Burton  S. Garbow</li>
  663.      *   <li>Kenneth E. Hillstrom</li>
  664.      *   <li>Jorge   J. More</li>
  665.      * </ul>
  666.      * <p>Luc Maisonobe did the Java translation.</p>
  667.      *
  668.      * @param qy array containing qTy
  669.      * @param diag diagonal matrix
  670.      * @param lmDiag diagonal elements associated with lmDir
  671.      * @param work work array
  672.      */
  673.     private void determineLMDirection(double[] qy, double[] diag,
  674.                                       double[] lmDiag, double[] work) {

  675.         // copy R and Qty to preserve input and initialize s
  676.         //  in particular, save the diagonal elements of R in lmDir
  677.         for (int j = 0; j < solvedCols; ++j) {
  678.             int pj = permutation[j];
  679.             for (int i = j + 1; i < solvedCols; ++i) {
  680.                 weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
  681.             }
  682.             lmDir[j] = diagR[pj];
  683.             work[j]  = qy[j];
  684.         }

  685.         // eliminate the diagonal matrix d using a Givens rotation
  686.         for (int j = 0; j < solvedCols; ++j) {

  687.             // prepare the row of d to be eliminated, locating the
  688.             // diagonal element using p from the Q.R. factorization
  689.             int pj = permutation[j];
  690.             double dpj = diag[pj];
  691.             if (dpj != 0) {
  692.                 Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
  693.             }
  694.             lmDiag[j] = dpj;

  695.             //  the transformations to eliminate the row of d
  696.             // modify only a single element of Qty
  697.             // beyond the first n, which is initially zero.
  698.             double qtbpj = 0;
  699.             for (int k = j; k < solvedCols; ++k) {
  700.                 int pk = permutation[k];

  701.                 // determine a Givens rotation which eliminates the
  702.                 // appropriate element in the current row of d
  703.                 if (lmDiag[k] != 0) {

  704.                     final double sin;
  705.                     final double cos;
  706.                     double rkk = weightedJacobian[k][pk];
  707.                     if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
  708.                         final double cotan = rkk / lmDiag[k];
  709.                         sin   = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
  710.                         cos   = sin * cotan;
  711.                     } else {
  712.                         final double tan = lmDiag[k] / rkk;
  713.                         cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
  714.                         sin = cos * tan;
  715.                     }

  716.                     // compute the modified diagonal element of R and
  717.                     // the modified element of (Qty,0)
  718.                     weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
  719.                     final double temp = cos * work[k] + sin * qtbpj;
  720.                     qtbpj = -sin * work[k] + cos * qtbpj;
  721.                     work[k] = temp;

  722.                     // accumulate the tranformation in the row of s
  723.                     for (int i = k + 1; i < solvedCols; ++i) {
  724.                         double rik = weightedJacobian[i][pk];
  725.                         final double temp2 = cos * rik + sin * lmDiag[i];
  726.                         lmDiag[i] = -sin * rik + cos * lmDiag[i];
  727.                         weightedJacobian[i][pk] = temp2;
  728.                     }
  729.                 }
  730.             }

  731.             // store the diagonal element of s and restore
  732.             // the corresponding diagonal element of R
  733.             lmDiag[j] = weightedJacobian[j][permutation[j]];
  734.             weightedJacobian[j][permutation[j]] = lmDir[j];
  735.         }

  736.         // solve the triangular system for z, if the system is
  737.         // singular, then obtain a least squares solution
  738.         int nSing = solvedCols;
  739.         for (int j = 0; j < solvedCols; ++j) {
  740.             if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
  741.                 nSing = j;
  742.             }
  743.             if (nSing < solvedCols) {
  744.                 work[j] = 0;
  745.             }
  746.         }
  747.         if (nSing > 0) {
  748.             for (int j = nSing - 1; j >= 0; --j) {
  749.                 int pj = permutation[j];
  750.                 double sum = 0;
  751.                 for (int i = j + 1; i < nSing; ++i) {
  752.                     sum += weightedJacobian[i][pj] * work[i];
  753.                 }
  754.                 work[j] = (work[j] - sum) / lmDiag[j];
  755.             }
  756.         }

  757.         // permute the components of z back to components of lmDir
  758.         for (int j = 0; j < lmDir.length; ++j) {
  759.             lmDir[permutation[j]] = work[j];
  760.         }
  761.     }

  762.     /**
  763.      * Decompose a matrix A as A.P = Q.R using Householder transforms.
  764.      * <p>As suggested in the P. Lascaux and R. Theodor book
  765.      * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
  766.      * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
  767.      * the Householder transforms with u<sub>k</sub> unit vectors such that:
  768.      * <pre>
  769.      * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
  770.      * </pre>
  771.      * we use <sub>k</sub> non-unit vectors such that:
  772.      * <pre>
  773.      * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
  774.      * </pre>
  775.      * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
  776.      * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
  777.      * them from the v<sub>k</sub> vectors would be costly.</p>
  778.      * <p>This decomposition handles rank deficient cases since the tranformations
  779.      * are performed in non-increasing columns norms order thanks to columns
  780.      * pivoting. The diagonal elements of the R matrix are therefore also in
  781.      * non-increasing absolute values order.</p>
  782.      *
  783.      * @param jacobian Weighted Jacobian matrix at the current point.
  784.      * @exception ConvergenceException if the decomposition cannot be performed
  785.      */
  786.     private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
  787.         // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
  788.         // hence the multiplication by -1.
  789.         weightedJacobian = jacobian.scalarMultiply(-1).getData();

  790.         final int nR = weightedJacobian.length;
  791.         final int nC = weightedJacobian[0].length;

  792.         // initializations
  793.         for (int k = 0; k < nC; ++k) {
  794.             permutation[k] = k;
  795.             double norm2 = 0;
  796.             for (int i = 0; i < nR; ++i) {
  797.                 double akk = weightedJacobian[i][k];
  798.                 norm2 += akk * akk;
  799.             }
  800.             jacNorm[k] = FastMath.sqrt(norm2);
  801.         }

  802.         // transform the matrix column after column
  803.         for (int k = 0; k < nC; ++k) {

  804.             // select the column with the greatest norm on active components
  805.             int nextColumn = -1;
  806.             double ak2 = Double.NEGATIVE_INFINITY;
  807.             for (int i = k; i < nC; ++i) {
  808.                 double norm2 = 0;
  809.                 for (int j = k; j < nR; ++j) {
  810.                     double aki = weightedJacobian[j][permutation[i]];
  811.                     norm2 += aki * aki;
  812.                 }
  813.                 if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
  814.                     throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
  815.                                                    nR, nC);
  816.                 }
  817.                 if (norm2 > ak2) {
  818.                     nextColumn = i;
  819.                     ak2        = norm2;
  820.                 }
  821.             }
  822.             if (ak2 <= qrRankingThreshold) {
  823.                 rank = k;
  824.                 return;
  825.             }
  826.             int pk                  = permutation[nextColumn];
  827.             permutation[nextColumn] = permutation[k];
  828.             permutation[k]          = pk;

  829.             // choose alpha such that Hk.u = alpha ek
  830.             double akk   = weightedJacobian[k][pk];
  831.             double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
  832.             double betak = 1.0 / (ak2 - akk * alpha);
  833.             beta[pk]     = betak;

  834.             // transform the current column
  835.             diagR[pk]        = alpha;
  836.             weightedJacobian[k][pk] -= alpha;

  837.             // transform the remaining columns
  838.             for (int dk = nC - 1 - k; dk > 0; --dk) {
  839.                 double gamma = 0;
  840.                 for (int j = k; j < nR; ++j) {
  841.                     gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
  842.                 }
  843.                 gamma *= betak;
  844.                 for (int j = k; j < nR; ++j) {
  845.                     weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
  846.                 }
  847.             }
  848.         }
  849.         rank = solvedCols;
  850.     }

  851.     /**
  852.      * Compute the product Qt.y for some Q.R. decomposition.
  853.      *
  854.      * @param y vector to multiply (will be overwritten with the result)
  855.      */
  856.     private void qTy(double[] y) {
  857.         final int nR = weightedJacobian.length;
  858.         final int nC = weightedJacobian[0].length;

  859.         for (int k = 0; k < nC; ++k) {
  860.             int pk = permutation[k];
  861.             double gamma = 0;
  862.             for (int i = k; i < nR; ++i) {
  863.                 gamma += weightedJacobian[i][pk] * y[i];
  864.             }
  865.             gamma *= beta[pk];
  866.             for (int i = k; i < nR; ++i) {
  867.                 y[i] -= gamma * weightedJacobian[i][pk];
  868.             }
  869.         }
  870.     }
  871. }