SimpleRegression.java

  1. /*
  2.  * Licensed to the Apache Software Foundation (ASF) under one or more
  3.  * contributor license agreements.  See the NOTICE file distributed with
  4.  * this work for additional information regarding copyright ownership.
  5.  * The ASF licenses this file to You under the Apache License, Version 2.0
  6.  * (the "License"); you may not use this file except in compliance with
  7.  * the License.  You may obtain a copy of the License at
  8.  *
  9.  *      http://www.apache.org/licenses/LICENSE-2.0
  10.  *
  11.  * Unless required by applicable law or agreed to in writing, software
  12.  * distributed under the License is distributed on an "AS IS" BASIS,
  13.  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14.  * See the License for the specific language governing permissions and
  15.  * limitations under the License.
  16.  */

  17. package org.apache.commons.math3.stat.regression;
  18. import java.io.Serializable;

  19. import org.apache.commons.math3.distribution.TDistribution;
  20. import org.apache.commons.math3.exception.MathIllegalArgumentException;
  21. import org.apache.commons.math3.exception.NoDataException;
  22. import org.apache.commons.math3.exception.OutOfRangeException;
  23. import org.apache.commons.math3.exception.util.LocalizedFormats;
  24. import org.apache.commons.math3.util.FastMath;
  25. import org.apache.commons.math3.util.Precision;

  26. /**
  27.  * Estimates an ordinary least squares regression model
  28.  * with one independent variable.
  29.  * <p>
  30.  * <code> y = intercept + slope * x  </code></p>
  31.  * <p>
  32.  * Standard errors for <code>intercept</code> and <code>slope</code> are
  33.  * available as well as ANOVA, r-square and Pearson's r statistics.</p>
  34.  * <p>
  35.  * Observations (x,y pairs) can be added to the model one at a time or they
  36.  * can be provided in a 2-dimensional array.  The observations are not stored
  37.  * in memory, so there is no limit to the number of observations that can be
  38.  * added to the model.</p>
  39.  * <p>
  40.  * <strong>Usage Notes</strong>: <ul>
  41.  * <li> When there are fewer than two observations in the model, or when
  42.  * there is no variation in the x values (i.e. all x values are the same)
  43.  * all statistics return <code>NaN</code>. At least two observations with
  44.  * different x coordinates are required to estimate a bivariate regression
  45.  * model.
  46.  * </li>
  47.  * <li> Getters for the statistics always compute values based on the current
  48.  * set of observations -- i.e., you can get statistics, then add more data
  49.  * and get updated statistics without using a new instance.  There is no
  50.  * "compute" method that updates all statistics.  Each of the getters performs
  51.  * the necessary computations to return the requested statistic.
  52.  * </li>
  53.  * <li> The intercept term may be suppressed by passing {@code false} to
  54.  * the {@link #SimpleRegression(boolean)} constructor.  When the
  55.  * {@code hasIntercept} property is false, the model is estimated without a
  56.  * constant term and {@link #getIntercept()} returns {@code 0}.</li>
  57.  * </ul></p>
  58.  *
  59.  */
  60. public class SimpleRegression implements Serializable, UpdatingMultipleLinearRegression {

  61.     /** Serializable version identifier */
  62.     private static final long serialVersionUID = -3004689053607543335L;

  63.     /** sum of x values */
  64.     private double sumX = 0d;

  65.     /** total variation in x (sum of squared deviations from xbar) */
  66.     private double sumXX = 0d;

  67.     /** sum of y values */
  68.     private double sumY = 0d;

  69.     /** total variation in y (sum of squared deviations from ybar) */
  70.     private double sumYY = 0d;

  71.     /** sum of products */
  72.     private double sumXY = 0d;

  73.     /** number of observations */
  74.     private long n = 0;

  75.     /** mean of accumulated x values, used in updating formulas */
  76.     private double xbar = 0;

  77.     /** mean of accumulated y values, used in updating formulas */
  78.     private double ybar = 0;

  79.     /** include an intercept or not */
  80.     private final boolean hasIntercept;
  81.     // ---------------------Public methods--------------------------------------

  82.     /**
  83.      * Create an empty SimpleRegression instance
  84.      */
  85.     public SimpleRegression() {
  86.         this(true);
  87.     }
  88.     /**
  89.     * Create a SimpleRegression instance, specifying whether or not to estimate
  90.     * an intercept.
  91.     *
  92.     * <p>Use {@code false} to estimate a model with no intercept.  When the
  93.     * {@code hasIntercept} property is false, the model is estimated without a
  94.     * constant term and {@link #getIntercept()} returns {@code 0}.</p>
  95.     *
  96.     * @param includeIntercept whether or not to include an intercept term in
  97.     * the regression model
  98.     */
  99.     public SimpleRegression(boolean includeIntercept) {
  100.         super();
  101.         hasIntercept = includeIntercept;
  102.     }

  103.     /**
  104.      * Adds the observation (x,y) to the regression data set.
  105.      * <p>
  106.      * Uses updating formulas for means and sums of squares defined in
  107.      * "Algorithms for Computing the Sample Variance: Analysis and
  108.      * Recommendations", Chan, T.F., Golub, G.H., and LeVeque, R.J.
  109.      * 1983, American Statistician, vol. 37, pp. 242-247, referenced in
  110.      * Weisberg, S. "Applied Linear Regression". 2nd Ed. 1985.</p>
  111.      *
  112.      *
  113.      * @param x independent variable value
  114.      * @param y dependent variable value
  115.      */
  116.     public void addData(final double x,final double y) {
  117.         if (n == 0) {
  118.             xbar = x;
  119.             ybar = y;
  120.         } else {
  121.             if( hasIntercept ){
  122.                 final double fact1 = 1.0 + n;
  123.                 final double fact2 = n / (1.0 + n);
  124.                 final double dx = x - xbar;
  125.                 final double dy = y - ybar;
  126.                 sumXX += dx * dx * fact2;
  127.                 sumYY += dy * dy * fact2;
  128.                 sumXY += dx * dy * fact2;
  129.                 xbar += dx / fact1;
  130.                 ybar += dy / fact1;
  131.             }
  132.          }
  133.         if( !hasIntercept ){
  134.             sumXX += x * x ;
  135.             sumYY += y * y ;
  136.             sumXY += x * y ;
  137.         }
  138.         sumX += x;
  139.         sumY += y;
  140.         n++;
  141.     }

  142.     /**
  143.      * Appends data from another regression calculation to this one.
  144.      *
  145.      * <p>The mean update formulae are based on a paper written by Philippe
  146.      * P&eacute;bay:
  147.      * <a
  148.      * href="http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf">
  149.      * Formulas for Robust, One-Pass Parallel Computation of Covariances and
  150.      * Arbitrary-Order Statistical Moments</a>, 2008, Technical Report
  151.      * SAND2008-6212, Sandia National Laboratories.</p>
  152.      *
  153.      * @param reg model to append data from
  154.      * @since 3.3
  155.      */
  156.     public void append(SimpleRegression reg) {
  157.         if (n == 0) {
  158.             xbar = reg.xbar;
  159.             ybar = reg.ybar;
  160.             sumXX = reg.sumXX;
  161.             sumYY = reg.sumYY;
  162.             sumXY = reg.sumXY;
  163.         } else {
  164.             if (hasIntercept) {
  165.                 final double fact1 = reg.n / (double) (reg.n + n);
  166.                 final double fact2 = n * reg.n / (double) (reg.n + n);
  167.                 final double dx = reg.xbar - xbar;
  168.                 final double dy = reg.ybar - ybar;
  169.                 sumXX += reg.sumXX + dx * dx * fact2;
  170.                 sumYY += reg.sumYY + dy * dy * fact2;
  171.                 sumXY += reg.sumXY + dx * dy * fact2;
  172.                 xbar += dx * fact1;
  173.                 ybar += dy * fact1;
  174.             }else{
  175.                 sumXX += reg.sumXX;
  176.                 sumYY += reg.sumYY;
  177.                 sumXY += reg.sumXY;
  178.             }
  179.         }
  180.         sumX += reg.sumX;
  181.         sumY += reg.sumY;
  182.         n += reg.n;
  183.     }

  184.     /**
  185.      * Removes the observation (x,y) from the regression data set.
  186.      * <p>
  187.      * Mirrors the addData method.  This method permits the use of
  188.      * SimpleRegression instances in streaming mode where the regression
  189.      * is applied to a sliding "window" of observations, however the caller is
  190.      * responsible for maintaining the set of observations in the window.</p>
  191.      *
  192.      * The method has no effect if there are no points of data (i.e. n=0)
  193.      *
  194.      * @param x independent variable value
  195.      * @param y dependent variable value
  196.      */
  197.     public void removeData(final double x,final double y) {
  198.         if (n > 0) {
  199.             if (hasIntercept) {
  200.                 final double fact1 = n - 1.0;
  201.                 final double fact2 = n / (n - 1.0);
  202.                 final double dx = x - xbar;
  203.                 final double dy = y - ybar;
  204.                 sumXX -= dx * dx * fact2;
  205.                 sumYY -= dy * dy * fact2;
  206.                 sumXY -= dx * dy * fact2;
  207.                 xbar -= dx / fact1;
  208.                 ybar -= dy / fact1;
  209.             } else {
  210.                 final double fact1 = n - 1.0;
  211.                 sumXX -= x * x;
  212.                 sumYY -= y * y;
  213.                 sumXY -= x * y;
  214.                 xbar -= x / fact1;
  215.                 ybar -= y / fact1;
  216.             }
  217.              sumX -= x;
  218.              sumY -= y;
  219.              n--;
  220.         }
  221.     }

  222.     /**
  223.      * Adds the observations represented by the elements in
  224.      * <code>data</code>.
  225.      * <p>
  226.      * <code>(data[0][0],data[0][1])</code> will be the first observation, then
  227.      * <code>(data[1][0],data[1][1])</code>, etc.</p>
  228.      * <p>
  229.      * This method does not replace data that has already been added.  The
  230.      * observations represented by <code>data</code> are added to the existing
  231.      * dataset.</p>
  232.      * <p>
  233.      * To replace all data, use <code>clear()</code> before adding the new
  234.      * data.</p>
  235.      *
  236.      * @param data array of observations to be added
  237.      * @throws ModelSpecificationException if the length of {@code data[i]} is not
  238.      * greater than or equal to 2
  239.      */
  240.     public void addData(final double[][] data) throws ModelSpecificationException {
  241.         for (int i = 0; i < data.length; i++) {
  242.             if( data[i].length < 2 ){
  243.                throw new ModelSpecificationException(LocalizedFormats.INVALID_REGRESSION_OBSERVATION,
  244.                     data[i].length, 2);
  245.             }
  246.             addData(data[i][0], data[i][1]);
  247.         }
  248.     }

  249.     /**
  250.      * Adds one observation to the regression model.
  251.      *
  252.      * @param x the independent variables which form the design matrix
  253.      * @param y the dependent or response variable
  254.      * @throws ModelSpecificationException if the length of {@code x} does not equal
  255.      * the number of independent variables in the model
  256.      */
  257.     public void addObservation(final double[] x,final double y)
  258.     throws ModelSpecificationException {
  259.         if( x == null || x.length == 0 ){
  260.             throw new ModelSpecificationException(LocalizedFormats.INVALID_REGRESSION_OBSERVATION,x!=null?x.length:0, 1);
  261.         }
  262.         addData( x[0], y );
  263.     }

  264.     /**
  265.      * Adds a series of observations to the regression model. The lengths of
  266.      * x and y must be the same and x must be rectangular.
  267.      *
  268.      * @param x a series of observations on the independent variables
  269.      * @param y a series of observations on the dependent variable
  270.      * The length of x and y must be the same
  271.      * @throws ModelSpecificationException if {@code x} is not rectangular, does not match
  272.      * the length of {@code y} or does not contain sufficient data to estimate the model
  273.      */
  274.     public void addObservations(final double[][] x,final double[] y) throws ModelSpecificationException {
  275.         if ((x == null) || (y == null) || (x.length != y.length)) {
  276.             throw new ModelSpecificationException(
  277.                   LocalizedFormats.DIMENSIONS_MISMATCH_SIMPLE,
  278.                   (x == null) ? 0 : x.length,
  279.                   (y == null) ? 0 : y.length);
  280.         }
  281.         boolean obsOk=true;
  282.         for( int i = 0 ; i < x.length; i++){
  283.             if( x[i] == null || x[i].length == 0 ){
  284.                 obsOk = false;
  285.             }
  286.         }
  287.         if( !obsOk ){
  288.             throw new ModelSpecificationException(
  289.                   LocalizedFormats.NOT_ENOUGH_DATA_FOR_NUMBER_OF_PREDICTORS,
  290.                   0, 1);
  291.         }
  292.         for( int i = 0 ; i < x.length ; i++){
  293.             addData( x[i][0], y[i] );
  294.         }
  295.     }

  296.     /**
  297.      * Removes observations represented by the elements in <code>data</code>.
  298.       * <p>
  299.      * If the array is larger than the current n, only the first n elements are
  300.      * processed.  This method permits the use of SimpleRegression instances in
  301.      * streaming mode where the regression is applied to a sliding "window" of
  302.      * observations, however the caller is responsible for maintaining the set
  303.      * of observations in the window.</p>
  304.      * <p>
  305.      * To remove all data, use <code>clear()</code>.</p>
  306.      *
  307.      * @param data array of observations to be removed
  308.      */
  309.     public void removeData(double[][] data) {
  310.         for (int i = 0; i < data.length && n > 0; i++) {
  311.             removeData(data[i][0], data[i][1]);
  312.         }
  313.     }

  314.     /**
  315.      * Clears all data from the model.
  316.      */
  317.     public void clear() {
  318.         sumX = 0d;
  319.         sumXX = 0d;
  320.         sumY = 0d;
  321.         sumYY = 0d;
  322.         sumXY = 0d;
  323.         n = 0;
  324.     }

  325.     /**
  326.      * Returns the number of observations that have been added to the model.
  327.      *
  328.      * @return n number of observations that have been added.
  329.      */
  330.     public long getN() {
  331.         return n;
  332.     }

  333.     /**
  334.      * Returns the "predicted" <code>y</code> value associated with the
  335.      * supplied <code>x</code> value,  based on the data that has been
  336.      * added to the model when this method is activated.
  337.      * <p>
  338.      * <code> predict(x) = intercept + slope * x </code></p>
  339.      * <p>
  340.      * <strong>Preconditions</strong>: <ul>
  341.      * <li>At least two observations (with at least two different x values)
  342.      * must have been added before invoking this method. If this method is
  343.      * invoked before a model can be estimated, <code>Double,NaN</code> is
  344.      * returned.
  345.      * </li></ul></p>
  346.      *
  347.      * @param x input <code>x</code> value
  348.      * @return predicted <code>y</code> value
  349.      */
  350.     public double predict(final double x) {
  351.         final double b1 = getSlope();
  352.         if (hasIntercept) {
  353.             return getIntercept(b1) + b1 * x;
  354.         }
  355.         return b1 * x;
  356.     }

  357.     /**
  358.      * Returns the intercept of the estimated regression line, if
  359.      * {@link #hasIntercept()} is true; otherwise 0.
  360.      * <p>
  361.      * The least squares estimate of the intercept is computed using the
  362.      * <a href="http://www.xycoon.com/estimation4.htm">normal equations</a>.
  363.      * The intercept is sometimes denoted b0.</p>
  364.      * <p>
  365.      * <strong>Preconditions</strong>: <ul>
  366.      * <li>At least two observations (with at least two different x values)
  367.      * must have been added before invoking this method. If this method is
  368.      * invoked before a model can be estimated, <code>Double,NaN</code> is
  369.      * returned.
  370.      * </li></ul></p>
  371.      *
  372.      * @return the intercept of the regression line if the model includes an
  373.      * intercept; 0 otherwise
  374.      * @see #SimpleRegression(boolean)
  375.      */
  376.     public double getIntercept() {
  377.         return hasIntercept ? getIntercept(getSlope()) : 0.0;
  378.     }

  379.     /**
  380.      * Returns true if the model includes an intercept term.
  381.      *
  382.      * @return true if the regression includes an intercept; false otherwise
  383.      * @see #SimpleRegression(boolean)
  384.      */
  385.     public boolean hasIntercept() {
  386.         return hasIntercept;
  387.     }

  388.     /**
  389.     * Returns the slope of the estimated regression line.
  390.     * <p>
  391.     * The least squares estimate of the slope is computed using the
  392.     * <a href="http://www.xycoon.com/estimation4.htm">normal equations</a>.
  393.     * The slope is sometimes denoted b1.</p>
  394.     * <p>
  395.     * <strong>Preconditions</strong>: <ul>
  396.     * <li>At least two observations (with at least two different x values)
  397.     * must have been added before invoking this method. If this method is
  398.     * invoked before a model can be estimated, <code>Double.NaN</code> is
  399.     * returned.
  400.     * </li></ul></p>
  401.     *
  402.     * @return the slope of the regression line
  403.     */
  404.     public double getSlope() {
  405.         if (n < 2) {
  406.             return Double.NaN; //not enough data
  407.         }
  408.         if (FastMath.abs(sumXX) < 10 * Double.MIN_VALUE) {
  409.             return Double.NaN; //not enough variation in x
  410.         }
  411.         return sumXY / sumXX;
  412.     }

  413.     /**
  414.      * Returns the <a href="http://www.xycoon.com/SumOfSquares.htm">
  415.      * sum of squared errors</a> (SSE) associated with the regression
  416.      * model.
  417.      * <p>
  418.      * The sum is computed using the computational formula</p>
  419.      * <p>
  420.      * <code>SSE = SYY - (SXY * SXY / SXX)</code></p>
  421.      * <p>
  422.      * where <code>SYY</code> is the sum of the squared deviations of the y
  423.      * values about their mean, <code>SXX</code> is similarly defined and
  424.      * <code>SXY</code> is the sum of the products of x and y mean deviations.
  425.      * </p><p>
  426.      * The sums are accumulated using the updating algorithm referenced in
  427.      * {@link #addData}.</p>
  428.      * <p>
  429.      * The return value is constrained to be non-negative - i.e., if due to
  430.      * rounding errors the computational formula returns a negative result,
  431.      * 0 is returned.</p>
  432.      * <p>
  433.      * <strong>Preconditions</strong>: <ul>
  434.      * <li>At least two observations (with at least two different x values)
  435.      * must have been added before invoking this method. If this method is
  436.      * invoked before a model can be estimated, <code>Double,NaN</code> is
  437.      * returned.
  438.      * </li></ul></p>
  439.      *
  440.      * @return sum of squared errors associated with the regression model
  441.      */
  442.     public double getSumSquaredErrors() {
  443.         return FastMath.max(0d, sumYY - sumXY * sumXY / sumXX);
  444.     }

  445.     /**
  446.      * Returns the sum of squared deviations of the y values about their mean.
  447.      * <p>
  448.      * This is defined as SSTO
  449.      * <a href="http://www.xycoon.com/SumOfSquares.htm">here</a>.</p>
  450.      * <p>
  451.      * If <code>n < 2</code>, this returns <code>Double.NaN</code>.</p>
  452.      *
  453.      * @return sum of squared deviations of y values
  454.      */
  455.     public double getTotalSumSquares() {
  456.         if (n < 2) {
  457.             return Double.NaN;
  458.         }
  459.         return sumYY;
  460.     }

  461.     /**
  462.      * Returns the sum of squared deviations of the x values about their mean.
  463.      *
  464.      * If <code>n < 2</code>, this returns <code>Double.NaN</code>.</p>
  465.      *
  466.      * @return sum of squared deviations of x values
  467.      */
  468.     public double getXSumSquares() {
  469.         if (n < 2) {
  470.             return Double.NaN;
  471.         }
  472.         return sumXX;
  473.     }

  474.     /**
  475.      * Returns the sum of crossproducts, x<sub>i</sub>*y<sub>i</sub>.
  476.      *
  477.      * @return sum of cross products
  478.      */
  479.     public double getSumOfCrossProducts() {
  480.         return sumXY;
  481.     }

  482.     /**
  483.      * Returns the sum of squared deviations of the predicted y values about
  484.      * their mean (which equals the mean of y).
  485.      * <p>
  486.      * This is usually abbreviated SSR or SSM.  It is defined as SSM
  487.      * <a href="http://www.xycoon.com/SumOfSquares.htm">here</a></p>
  488.      * <p>
  489.      * <strong>Preconditions</strong>: <ul>
  490.      * <li>At least two observations (with at least two different x values)
  491.      * must have been added before invoking this method. If this method is
  492.      * invoked before a model can be estimated, <code>Double.NaN</code> is
  493.      * returned.
  494.      * </li></ul></p>
  495.      *
  496.      * @return sum of squared deviations of predicted y values
  497.      */
  498.     public double getRegressionSumSquares() {
  499.         return getRegressionSumSquares(getSlope());
  500.     }

  501.     /**
  502.      * Returns the sum of squared errors divided by the degrees of freedom,
  503.      * usually abbreviated MSE.
  504.      * <p>
  505.      * If there are fewer than <strong>three</strong> data pairs in the model,
  506.      * or if there is no variation in <code>x</code>, this returns
  507.      * <code>Double.NaN</code>.</p>
  508.      *
  509.      * @return sum of squared deviations of y values
  510.      */
  511.     public double getMeanSquareError() {
  512.         if (n < 3) {
  513.             return Double.NaN;
  514.         }
  515.         return hasIntercept ? (getSumSquaredErrors() / (n - 2)) : (getSumSquaredErrors() / (n - 1));
  516.     }

  517.     /**
  518.      * Returns <a href="http://mathworld.wolfram.com/CorrelationCoefficient.html">
  519.      * Pearson's product moment correlation coefficient</a>,
  520.      * usually denoted r.
  521.      * <p>
  522.      * <strong>Preconditions</strong>: <ul>
  523.      * <li>At least two observations (with at least two different x values)
  524.      * must have been added before invoking this method. If this method is
  525.      * invoked before a model can be estimated, <code>Double,NaN</code> is
  526.      * returned.
  527.      * </li></ul></p>
  528.      *
  529.      * @return Pearson's r
  530.      */
  531.     public double getR() {
  532.         double b1 = getSlope();
  533.         double result = FastMath.sqrt(getRSquare());
  534.         if (b1 < 0) {
  535.             result = -result;
  536.         }
  537.         return result;
  538.     }

  539.     /**
  540.      * Returns the <a href="http://www.xycoon.com/coefficient1.htm">
  541.      * coefficient of determination</a>,
  542.      * usually denoted r-square.
  543.      * <p>
  544.      * <strong>Preconditions</strong>: <ul>
  545.      * <li>At least two observations (with at least two different x values)
  546.      * must have been added before invoking this method. If this method is
  547.      * invoked before a model can be estimated, <code>Double,NaN</code> is
  548.      * returned.
  549.      * </li></ul></p>
  550.      *
  551.      * @return r-square
  552.      */
  553.     public double getRSquare() {
  554.         double ssto = getTotalSumSquares();
  555.         return (ssto - getSumSquaredErrors()) / ssto;
  556.     }

  557.     /**
  558.      * Returns the <a href="http://www.xycoon.com/standarderrorb0.htm">
  559.      * standard error of the intercept estimate</a>,
  560.      * usually denoted s(b0).
  561.      * <p>
  562.      * If there are fewer that <strong>three</strong> observations in the
  563.      * model, or if there is no variation in x, this returns
  564.      * <code>Double.NaN</code>.</p> Additionally, a <code>Double.NaN</code> is
  565.      * returned when the intercept is constrained to be zero
  566.      *
  567.      * @return standard error associated with intercept estimate
  568.      */
  569.     public double getInterceptStdErr() {
  570.         if( !hasIntercept ){
  571.             return Double.NaN;
  572.         }
  573.         return FastMath.sqrt(
  574.             getMeanSquareError() * ((1d / n) + (xbar * xbar) / sumXX));
  575.     }

  576.     /**
  577.      * Returns the <a href="http://www.xycoon.com/standerrorb(1).htm">standard
  578.      * error of the slope estimate</a>,
  579.      * usually denoted s(b1).
  580.      * <p>
  581.      * If there are fewer that <strong>three</strong> data pairs in the model,
  582.      * or if there is no variation in x, this returns <code>Double.NaN</code>.
  583.      * </p>
  584.      *
  585.      * @return standard error associated with slope estimate
  586.      */
  587.     public double getSlopeStdErr() {
  588.         return FastMath.sqrt(getMeanSquareError() / sumXX);
  589.     }

  590.     /**
  591.      * Returns the half-width of a 95% confidence interval for the slope
  592.      * estimate.
  593.      * <p>
  594.      * The 95% confidence interval is</p>
  595.      * <p>
  596.      * <code>(getSlope() - getSlopeConfidenceInterval(),
  597.      * getSlope() + getSlopeConfidenceInterval())</code></p>
  598.      * <p>
  599.      * If there are fewer that <strong>three</strong> observations in the
  600.      * model, or if there is no variation in x, this returns
  601.      * <code>Double.NaN</code>.</p>
  602.      * <p>
  603.      * <strong>Usage Note</strong>:<br>
  604.      * The validity of this statistic depends on the assumption that the
  605.      * observations included in the model are drawn from a
  606.      * <a href="http://mathworld.wolfram.com/BivariateNormalDistribution.html">
  607.      * Bivariate Normal Distribution</a>.</p>
  608.      *
  609.      * @return half-width of 95% confidence interval for the slope estimate
  610.      * @throws OutOfRangeException if the confidence interval can not be computed.
  611.      */
  612.     public double getSlopeConfidenceInterval() throws OutOfRangeException {
  613.         return getSlopeConfidenceInterval(0.05d);
  614.     }

  615.     /**
  616.      * Returns the half-width of a (100-100*alpha)% confidence interval for
  617.      * the slope estimate.
  618.      * <p>
  619.      * The (100-100*alpha)% confidence interval is </p>
  620.      * <p>
  621.      * <code>(getSlope() - getSlopeConfidenceInterval(),
  622.      * getSlope() + getSlopeConfidenceInterval())</code></p>
  623.      * <p>
  624.      * To request, for example, a 99% confidence interval, use
  625.      * <code>alpha = .01</code></p>
  626.      * <p>
  627.      * <strong>Usage Note</strong>:<br>
  628.      * The validity of this statistic depends on the assumption that the
  629.      * observations included in the model are drawn from a
  630.      * <a href="http://mathworld.wolfram.com/BivariateNormalDistribution.html">
  631.      * Bivariate Normal Distribution</a>.</p>
  632.      * <p>
  633.      * <strong> Preconditions:</strong><ul>
  634.      * <li>If there are fewer that <strong>three</strong> observations in the
  635.      * model, or if there is no variation in x, this returns
  636.      * <code>Double.NaN</code>.
  637.      * </li>
  638.      * <li><code>(0 < alpha < 1)</code>; otherwise an
  639.      * <code>OutOfRangeException</code> is thrown.
  640.      * </li></ul></p>
  641.      *
  642.      * @param alpha the desired significance level
  643.      * @return half-width of 95% confidence interval for the slope estimate
  644.      * @throws OutOfRangeException if the confidence interval can not be computed.
  645.      */
  646.     public double getSlopeConfidenceInterval(final double alpha)
  647.     throws OutOfRangeException {
  648.         if (n < 3) {
  649.             return Double.NaN;
  650.         }
  651.         if (alpha >= 1 || alpha <= 0) {
  652.             throw new OutOfRangeException(LocalizedFormats.SIGNIFICANCE_LEVEL,
  653.                                           alpha, 0, 1);
  654.         }
  655.         // No advertised NotStrictlyPositiveException here - will return NaN above
  656.         TDistribution distribution = new TDistribution(n - 2);
  657.         return getSlopeStdErr() *
  658.             distribution.inverseCumulativeProbability(1d - alpha / 2d);
  659.     }

  660.     /**
  661.      * Returns the significance level of the slope (equiv) correlation.
  662.      * <p>
  663.      * Specifically, the returned value is the smallest <code>alpha</code>
  664.      * such that the slope confidence interval with significance level
  665.      * equal to <code>alpha</code> does not include <code>0</code>.
  666.      * On regression output, this is often denoted <code>Prob(|t| > 0)</code>
  667.      * </p><p>
  668.      * <strong>Usage Note</strong>:<br>
  669.      * The validity of this statistic depends on the assumption that the
  670.      * observations included in the model are drawn from a
  671.      * <a href="http://mathworld.wolfram.com/BivariateNormalDistribution.html">
  672.      * Bivariate Normal Distribution</a>.</p>
  673.      * <p>
  674.      * If there are fewer that <strong>three</strong> observations in the
  675.      * model, or if there is no variation in x, this returns
  676.      * <code>Double.NaN</code>.</p>
  677.      *
  678.      * @return significance level for slope/correlation
  679.      * @throws org.apache.commons.math3.exception.MaxCountExceededException
  680.      * if the significance level can not be computed.
  681.      */
  682.     public double getSignificance() {
  683.         if (n < 3) {
  684.             return Double.NaN;
  685.         }
  686.         // No advertised NotStrictlyPositiveException here - will return NaN above
  687.         TDistribution distribution = new TDistribution(n - 2);
  688.         return 2d * (1.0 - distribution.cumulativeProbability(
  689.                     FastMath.abs(getSlope()) / getSlopeStdErr()));
  690.     }

  691.     // ---------------------Private methods-----------------------------------

  692.     /**
  693.     * Returns the intercept of the estimated regression line, given the slope.
  694.     * <p>
  695.     * Will return <code>NaN</code> if slope is <code>NaN</code>.</p>
  696.     *
  697.     * @param slope current slope
  698.     * @return the intercept of the regression line
  699.     */
  700.     private double getIntercept(final double slope) {
  701.       if( hasIntercept){
  702.         return (sumY - slope * sumX) / n;
  703.       }
  704.       return 0.0;
  705.     }

  706.     /**
  707.      * Computes SSR from b1.
  708.      *
  709.      * @param slope regression slope estimate
  710.      * @return sum of squared deviations of predicted y values
  711.      */
  712.     private double getRegressionSumSquares(final double slope) {
  713.         return slope * slope * sumXX;
  714.     }

  715.     /**
  716.      * Performs a regression on data present in buffers and outputs a RegressionResults object.
  717.      *
  718.      * <p>If there are fewer than 3 observations in the model and {@code hasIntercept} is true
  719.      * a {@code NoDataException} is thrown.  If there is no intercept term, the model must
  720.      * contain at least 2 observations.</p>
  721.      *
  722.      * @return RegressionResults acts as a container of regression output
  723.      * @throws ModelSpecificationException if the model is not correctly specified
  724.      * @throws NoDataException if there is not sufficient data in the model to
  725.      * estimate the regression parameters
  726.      */
  727.     public RegressionResults regress() throws ModelSpecificationException, NoDataException {
  728.         if (hasIntercept) {
  729.             if (n < 3) {
  730.                 throw new NoDataException(LocalizedFormats.NOT_ENOUGH_DATA_REGRESSION);
  731.             }
  732.             if (FastMath.abs(sumXX) > Precision.SAFE_MIN) {
  733.                 final double[] params = new double[] { getIntercept(), getSlope() };
  734.                 final double mse = getMeanSquareError();
  735.                 final double _syy = sumYY + sumY * sumY / n;
  736.                 final double[] vcv = new double[] { mse * (xbar * xbar / sumXX + 1.0 / n), -xbar * mse / sumXX, mse / sumXX };
  737.                 return new RegressionResults(params, new double[][] { vcv }, true, n, 2, sumY, _syy, getSumSquaredErrors(), true,
  738.                         false);
  739.             } else {
  740.                 final double[] params = new double[] { sumY / n, Double.NaN };
  741.                 // final double mse = getMeanSquareError();
  742.                 final double[] vcv = new double[] { ybar / (n - 1.0), Double.NaN, Double.NaN };
  743.                 return new RegressionResults(params, new double[][] { vcv }, true, n, 1, sumY, sumYY, getSumSquaredErrors(), true,
  744.                         false);
  745.             }
  746.         } else {
  747.             if (n < 2) {
  748.                 throw new NoDataException(LocalizedFormats.NOT_ENOUGH_DATA_REGRESSION);
  749.             }
  750.             if (!Double.isNaN(sumXX)) {
  751.                 final double[] vcv = new double[] { getMeanSquareError() / sumXX };
  752.                 final double[] params = new double[] { sumXY / sumXX };
  753.                 return new RegressionResults(params, new double[][] { vcv }, true, n, 1, sumY, sumYY, getSumSquaredErrors(), false,
  754.                         false);
  755.             } else {
  756.                 final double[] vcv = new double[] { Double.NaN };
  757.                 final double[] params = new double[] { Double.NaN };
  758.                 return new RegressionResults(params, new double[][] { vcv }, true, n, 1, Double.NaN, Double.NaN, Double.NaN, false,
  759.                         false);
  760.             }
  761.         }
  762.     }

  763.     /**
  764.      * Performs a regression on data present in buffers including only regressors
  765.      * indexed in variablesToInclude and outputs a RegressionResults object
  766.      * @param variablesToInclude an array of indices of regressors to include
  767.      * @return RegressionResults acts as a container of regression output
  768.      * @throws MathIllegalArgumentException if the variablesToInclude array is null or zero length
  769.      * @throws OutOfRangeException if a requested variable is not present in model
  770.      */
  771.     public RegressionResults regress(int[] variablesToInclude) throws MathIllegalArgumentException{
  772.         if( variablesToInclude == null || variablesToInclude.length == 0){
  773.           throw new MathIllegalArgumentException(LocalizedFormats.ARRAY_ZERO_LENGTH_OR_NULL_NOT_ALLOWED);
  774.         }
  775.         if( variablesToInclude.length > 2 || (variablesToInclude.length > 1 && !hasIntercept) ){
  776.             throw new ModelSpecificationException(
  777.                     LocalizedFormats.ARRAY_SIZE_EXCEEDS_MAX_VARIABLES,
  778.                     (variablesToInclude.length > 1 && !hasIntercept) ? 1 : 2);
  779.         }

  780.         if( hasIntercept ){
  781.             if( variablesToInclude.length == 2 ){
  782.                 if( variablesToInclude[0] == 1 ){
  783.                     throw new ModelSpecificationException(LocalizedFormats.NOT_INCREASING_SEQUENCE);
  784.                 }else if( variablesToInclude[0] != 0 ){
  785.                     throw new OutOfRangeException( variablesToInclude[0], 0,1 );
  786.                 }
  787.                 if( variablesToInclude[1] != 1){
  788.                      throw new OutOfRangeException( variablesToInclude[0], 0,1 );
  789.                 }
  790.                 return regress();
  791.             }else{
  792.                 if( variablesToInclude[0] != 1 && variablesToInclude[0] != 0 ){
  793.                      throw new OutOfRangeException( variablesToInclude[0],0,1 );
  794.                 }
  795.                 final double _mean = sumY * sumY / n;
  796.                 final double _syy = sumYY + _mean;
  797.                 if( variablesToInclude[0] == 0 ){
  798.                     //just the mean
  799.                     final double[] vcv = new double[]{ sumYY/(((n-1)*n)) };
  800.                     final double[] params = new double[]{ ybar };
  801.                     return new RegressionResults(
  802.                       params, new double[][]{vcv}, true, n, 1,
  803.                       sumY, _syy+_mean, sumYY,true,false);

  804.                 }else if( variablesToInclude[0] == 1){
  805.                     //final double _syy = sumYY + sumY * sumY / ((double) n);
  806.                     final double _sxx = sumXX + sumX * sumX / n;
  807.                     final double _sxy = sumXY + sumX * sumY / n;
  808.                     final double _sse = FastMath.max(0d, _syy - _sxy * _sxy / _sxx);
  809.                     final double _mse = _sse/((n-1));
  810.                     if( !Double.isNaN(_sxx) ){
  811.                         final double[] vcv = new double[]{ _mse / _sxx };
  812.                         final double[] params = new double[]{ _sxy/_sxx };
  813.                         return new RegressionResults(
  814.                                     params, new double[][]{vcv}, true, n, 1,
  815.                                     sumY, _syy, _sse,false,false);
  816.                     }else{
  817.                         final double[] vcv = new double[]{Double.NaN };
  818.                         final double[] params = new double[]{ Double.NaN };
  819.                         return new RegressionResults(
  820.                                     params, new double[][]{vcv}, true, n, 1,
  821.                                     Double.NaN, Double.NaN, Double.NaN,false,false);
  822.                     }
  823.                 }
  824.             }
  825.         }else{
  826.             if( variablesToInclude[0] != 0 ){
  827.                 throw new OutOfRangeException(variablesToInclude[0],0,0);
  828.             }
  829.             return regress();
  830.         }

  831.         return null;
  832.     }
  833. }