类org.apache.commons.math3.linear.DecompositionSolver源码实例Demo

下面列出了怎么用org.apache.commons.math3.linear.DecompositionSolver的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: systemds   文件: LibCommonsMath.java
/**
 * Function to solve a given system of equations.
 * 
 * @param in1 matrix object 1
 * @param in2 matrix object 2
 * @return matrix block
 */
private static MatrixBlock computeSolve(MatrixBlock in1, MatrixBlock in2) {
	//convert to commons math BlockRealMatrix instead of Array2DRowRealMatrix
	//to avoid unnecessary conversion as QR internally creates a BlockRealMatrix
	BlockRealMatrix matrixInput = DataConverter.convertToBlockRealMatrix(in1);
	BlockRealMatrix vectorInput = DataConverter.convertToBlockRealMatrix(in2);
	
	/*LUDecompositionImpl ludecompose = new LUDecompositionImpl(matrixInput);
	DecompositionSolver lusolver = ludecompose.getSolver();
	RealMatrix solutionMatrix = lusolver.solve(vectorInput);*/
	
	// Setup a solver based on QR Decomposition
	QRDecomposition qrdecompose = new QRDecomposition(matrixInput);
	DecompositionSolver solver = qrdecompose.getSolver();
	// Invoke solve
	RealMatrix solutionMatrix = solver.solve(vectorInput);
	
	return DataConverter.convertToMatrixBlock(solutionMatrix);
}
 
源代码2 项目: systemds   文件: LibCommonsMath.java
/**
 * Function to solve a given system of equations.
 * 
 * @param in1 matrix object 1
 * @param in2 matrix object 2
 * @return matrix block
 */
private static MatrixBlock computeSolve(MatrixBlock in1, MatrixBlock in2) {
	//convert to commons math BlockRealMatrix instead of Array2DRowRealMatrix
	//to avoid unnecessary conversion as QR internally creates a BlockRealMatrix
	BlockRealMatrix matrixInput = DataConverter.convertToBlockRealMatrix(in1);
	BlockRealMatrix vectorInput = DataConverter.convertToBlockRealMatrix(in2);
	
	/*LUDecompositionImpl ludecompose = new LUDecompositionImpl(matrixInput);
	DecompositionSolver lusolver = ludecompose.getSolver();
	RealMatrix solutionMatrix = lusolver.solve(vectorInput);*/
	
	// Setup a solver based on QR Decomposition
	QRDecomposition qrdecompose = new QRDecomposition(matrixInput);
	DecompositionSolver solver = qrdecompose.getSolver();
	// Invoke solve
	RealMatrix solutionMatrix = solver.solve(vectorInput);
	
	return DataConverter.convertToMatrixBlock(solutionMatrix);
}
 
源代码3 项目: astor   文件: AbstractLeastSquaresOptimizer.java
/**
 * Get the covariance matrix of the optimized parameters.
 * <br/>
 * Note that this operation involves the inversion of the
 * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
 * Jacobian matrix.
 * The {@code threshold} parameter is a way for the caller to specify
 * that the result of this computation should be considered meaningless,
 * and thus trigger an exception.
 *
 * @param threshold Singularity threshold.
 * @return the covariance matrix.
 * @throws org.apache.commons.math3.linear.SingularMatrixException
 * if the covariance matrix cannot be computed (singular problem).
 */
public double[][] getCovariances(double threshold) {
    // Set up the jacobian.
    updateJacobian();

    // Compute transpose(J)J, without building intermediate matrices.
    double[][] jTj = new double[cols][cols];
    for (int i = 0; i < cols; ++i) {
        for (int j = i; j < cols; ++j) {
            double sum = 0;
            for (int k = 0; k < rows; ++k) {
                sum += weightedResidualJacobian[k][i] * weightedResidualJacobian[k][j];
            }
            jTj[i][j] = sum;
            jTj[j][i] = sum;
        }
    }

    // Compute the covariances matrix.
    final DecompositionSolver solver
        = new QRDecomposition(MatrixUtils.createRealMatrix(jTj), threshold).getSolver();
    return solver.getInverse().getData();
}
 
源代码4 项目: astor   文件: AbstractLeastSquaresOptimizer.java
/**
 * Get the covariance matrix of the optimized parameters.
 * <br/>
 * Note that this operation involves the inversion of the
 * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
 * Jacobian matrix.
 * The {@code threshold} parameter is a way for the caller to specify
 * that the result of this computation should be considered meaningless,
 * and thus trigger an exception.
 *
 * @param threshold Singularity threshold.
 * @return the covariance matrix.
 * @throws org.apache.commons.math3.linear.SingularMatrixException
 * if the covariance matrix cannot be computed (singular problem).
 */
public double[][] getCovariances(double threshold) {
    // Set up the jacobian.
    updateJacobian();

    // Compute transpose(J)J, without building intermediate matrices.
    double[][] jTj = new double[cols][cols];
    for (int i = 0; i < cols; ++i) {
        for (int j = i; j < cols; ++j) {
            double sum = 0;
            for (int k = 0; k < rows; ++k) {
                sum += weightedResidualJacobian[k][i] * weightedResidualJacobian[k][j];
            }
            jTj[i][j] = sum;
            jTj[j][i] = sum;
        }
    }

    // Compute the covariances matrix.
    final DecompositionSolver solver
        = new QRDecomposition(MatrixUtils.createRealMatrix(jTj), threshold).getSolver();
    return solver.getInverse().getData();
}
 
源代码5 项目: astor   文件: AbstractLeastSquaresOptimizer.java
/**
 * Get the covariance matrix of the optimized parameters.
 * <br/>
 * Note that this operation involves the inversion of the
 * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
 * Jacobian matrix.
 * The {@code threshold} parameter is a way for the caller to specify
 * that the result of this computation should be considered meaningless,
 * and thus trigger an exception.
 *
 * @param threshold Singularity threshold.
 * @return the covariance matrix.
 * @throws org.apache.commons.math3.linear.SingularMatrixException
 * if the covariance matrix cannot be computed (singular problem).
 */
public double[][] getCovariances(double threshold) {
    // Set up the jacobian.
    updateJacobian();

    // Compute transpose(J)J, without building intermediate matrices.
    double[][] jTj = new double[cols][cols];
    for (int i = 0; i < cols; ++i) {
        for (int j = i; j < cols; ++j) {
            double sum = 0;
            for (int k = 0; k < rows; ++k) {
                sum += weightedResidualJacobian[k][i] * weightedResidualJacobian[k][j];
            }
            jTj[i][j] = sum;
            jTj[j][i] = sum;
        }
    }

    // Compute the covariances matrix.
    final DecompositionSolver solver
        = new QRDecomposition(MatrixUtils.createRealMatrix(jTj), threshold).getSolver();
    return solver.getInverse().getData();
}
 
源代码6 项目: oryx   文件: LinearSystemSolver.java
/**
 * @param data dense matrix represented in row-major form
 * @return solver for the system Ax = b
 */
static Solver getSolver(double[][] data) {
  if (data == null) {
    return null;
  }
  RealMatrix M = new Array2DRowRealMatrix(data, false);
  double infNorm = M.getNorm();
  double singularityThreshold = infNorm * SINGULARITY_THRESHOLD_RATIO;
  RRQRDecomposition decomposition = new RRQRDecomposition(M, singularityThreshold);
  DecompositionSolver solver = decomposition.getSolver();
  if (solver.isNonSingular()) {
    return new Solver(solver);
  }
  // Otherwise try to report apparent rank
  int apparentRank = decomposition.getRank(0.01); // Better value?
  log.warn("{} x {} matrix is near-singular (threshold {}). Add more data or decrease the " +
           "number of features, to <= about {}",
           M.getRowDimension(), 
           M.getColumnDimension(),
           singularityThreshold,
           apparentRank);
  throw new SingularMatrixSolverException(apparentRank, "Apparent rank: " + apparentRank);
}
 
源代码7 项目: finmath-lib   文件: LinearAlgebra.java
/**
 * Find a solution of the linear equation A x = b where
 * <ul>
 * <li>A is an n x m - matrix given as double[n][m]</li>
 * <li>b is an m - vector given as double[m],</li>
 * <li>x is an n - vector given as double[n],</li>
 * </ul>
 *
 * @param matrixA The matrix A (left hand side of the linear equation).
 * @param b The vector (right hand of the linear equation).
 * @return A solution x to A x = b.
 */
public static double[] solveLinearEquationSVD(final double[][] matrixA, final double[] b) {

	if(isSolverUseApacheCommonsMath) {
		final Array2DRowRealMatrix matrix = new Array2DRowRealMatrix(matrixA);

		// Using SVD - very slow
		final DecompositionSolver solver = new SingularValueDecomposition(matrix).getSolver();

		return solver.solve(new Array2DRowRealMatrix(b)).getColumn(0);
	}
	else {
		return org.jblas.Solve.solve(new org.jblas.DoubleMatrix(matrixA), new org.jblas.DoubleMatrix(b)).data;

		// For use of colt:
		// cern.colt.matrix.linalg.Algebra linearAlgebra = new cern.colt.matrix.linalg.Algebra();
		// return linearAlgebra.solve(new DenseDoubleMatrix2D(A), linearAlgebra.transpose(new DenseDoubleMatrix2D(new double[][] { b }))).viewColumn(0).toArray();

		// For use of parallel colt:
		// return new cern.colt.matrix.tdouble.algo.decomposition.DenseDoubleLUDecomposition(new cern.colt.matrix.tdouble.impl.DenseDoubleMatrix2D(A)).solve(new cern.colt.matrix.tdouble.impl.DenseDoubleMatrix1D(b)).toArray();
	}
}
 
源代码8 项目: finmath-lib   文件: LinearAlgebra.java
/**
 * Find a solution of the linear equation A x = b where
 * <ul>
 * <li>A is an symmetric n x n - matrix given as double[n][n]</li>
 * <li>b is an n - vector given as double[n],</li>
 * <li>x is an n - vector given as double[n],</li>
 * </ul>
 *
 * @param matrix The matrix A (left hand side of the linear equation).
 * @param vector The vector b (right hand of the linear equation).
 * @return A solution x to A x = b.
 */
public static double[] solveLinearEquationSymmetric(final double[][] matrix, final double[] vector) {
	if(isSolverUseApacheCommonsMath) {
		final DecompositionSolver solver = new LUDecomposition(new Array2DRowRealMatrix(matrix)).getSolver();
		return solver.solve(new Array2DRowRealMatrix(vector)).getColumn(0);
	}
	else {
		return org.jblas.Solve.solveSymmetric(new org.jblas.DoubleMatrix(matrix), new org.jblas.DoubleMatrix(vector)).data;
		/* To use the linear algebra package colt from cern.
		cern.colt.matrix.linalg.Algebra linearAlgebra = new cern.colt.matrix.linalg.Algebra();
		double[] x = linearAlgebra.solve(new DenseDoubleMatrix2D(A), linearAlgebra.transpose(new DenseDoubleMatrix2D(new double[][] { b }))).viewColumn(0).toArray();

		return x;
		 */
	}
}
 
源代码9 项目: finmath-lib   文件: LinearAlgebra.java
/**
 * Find a solution of the linear equation A x = b where
 * <ul>
 * <li>A is an n x m - matrix given as double[n][m]</li>
 * <li>b is an m - vector given as double[m],</li>
 * <li>x is an n - vector given as double[n],</li>
 * </ul>
 *
 * @param matrixA The matrix A (left hand side of the linear equation).
 * @param b The vector (right hand of the linear equation).
 * @return A solution x to A x = b.
 */
public static double[] solveLinearEquationSVD(final double[][] matrixA, final double[] b) {

	if(isSolverUseApacheCommonsMath) {
		final Array2DRowRealMatrix matrix = new Array2DRowRealMatrix(matrixA);

		// Using SVD - very slow
		final DecompositionSolver solver = new SingularValueDecomposition(matrix).getSolver();

		return solver.solve(new Array2DRowRealMatrix(b)).getColumn(0);
	}
	else {
		return org.jblas.Solve.solve(new org.jblas.DoubleMatrix(matrixA), new org.jblas.DoubleMatrix(b)).data;

		// For use of colt:
		// cern.colt.matrix.linalg.Algebra linearAlgebra = new cern.colt.matrix.linalg.Algebra();
		// return linearAlgebra.solve(new DenseDoubleMatrix2D(A), linearAlgebra.transpose(new DenseDoubleMatrix2D(new double[][] { b }))).viewColumn(0).toArray();

		// For use of parallel colt:
		// return new cern.colt.matrix.tdouble.algo.decomposition.DenseDoubleLUDecomposition(new cern.colt.matrix.tdouble.impl.DenseDoubleMatrix2D(A)).solve(new cern.colt.matrix.tdouble.impl.DenseDoubleMatrix1D(b)).toArray();
	}
}
 
源代码10 项目: finmath-lib   文件: LinearAlgebra.java
/**
 * Find a solution of the linear equation A x = b where
 * <ul>
 * <li>A is an symmetric n x n - matrix given as double[n][n]</li>
 * <li>b is an n - vector given as double[n],</li>
 * <li>x is an n - vector given as double[n],</li>
 * </ul>
 *
 * @param matrix The matrix A (left hand side of the linear equation).
 * @param vector The vector b (right hand of the linear equation).
 * @return A solution x to A x = b.
 */
public static double[] solveLinearEquationSymmetric(final double[][] matrix, final double[] vector) {
	if(isSolverUseApacheCommonsMath) {
		final DecompositionSolver solver = new LUDecomposition(new Array2DRowRealMatrix(matrix)).getSolver();
		return solver.solve(new Array2DRowRealMatrix(vector)).getColumn(0);
	}
	else {
		return org.jblas.Solve.solveSymmetric(new org.jblas.DoubleMatrix(matrix), new org.jblas.DoubleMatrix(vector)).data;
		/* To use the linear algebra package colt from cern.
		cern.colt.matrix.linalg.Algebra linearAlgebra = new cern.colt.matrix.linalg.Algebra();
		double[] x = linearAlgebra.solve(new DenseDoubleMatrix2D(A), linearAlgebra.transpose(new DenseDoubleMatrix2D(new double[][] { b }))).viewColumn(0).toArray();

		return x;
		 */
	}
}
 
@Override
public Solver getSolver(RealMatrix M) {
  if (M == null) {
    return null;
  }
  RRQRDecomposition decomposition = new RRQRDecomposition(M, SINGULARITY_THRESHOLD);
  DecompositionSolver solver = decomposition.getSolver();
  if (solver.isNonSingular()) {
    return new CommonsMathSolver(solver);
  }
  // Otherwise try to report apparent rank
  int apparentRank = decomposition.getRank(0.01); // Better value?
  log.warn("{} x {} matrix is near-singular (threshold {}). Add more data or decrease the value of model.features, " +
           "to <= about {}",
           M.getRowDimension(), 
           M.getColumnDimension(), 
           SINGULARITY_THRESHOLD,
           apparentRank);
  throw new SingularMatrixSolverException(apparentRank, "Apparent rank: " + apparentRank);
}
 
源代码12 项目: MeteoInfo   文件: LinalgUtil.java
/**
 * Solve a linear matrix equation, or system of linear scalar equations.
 *
 * @param a Coefficient matrix.
 * @param b Ordinate or “dependent variable” values.
 * @return Solution to the system a x = b. Returned shape is identical to b.
 */
public static Array solve(Array a, Array b) {
    Array r = Array.factory(DataType.DOUBLE, b.getShape());
    double[][] aa = (double[][]) ArrayUtil.copyToNDJavaArray_Double(a);
    RealMatrix coefficients = new Array2DRowRealMatrix(aa, false);
    DecompositionSolver solver = new LUDecomposition(coefficients).getSolver();
    double[] bb = (double[]) ArrayUtil.copyToNDJavaArray_Double(b);
    RealVector constants = new ArrayRealVector(bb, false);
    RealVector solution = solver.solve(constants);
    for (int i = 0; i < r.getSize(); i++) {
        r.setDouble(i, solution.getEntry(i));
    }

    return r;
}
 
源代码13 项目: systemds   文件: LibCommonsMath.java
/**
 * Function to compute matrix inverse via matrix decomposition.
 * 
 * @param in commons-math3 Array2DRowRealMatrix
 * @return matrix block
 */
private static MatrixBlock computeMatrixInverse(Array2DRowRealMatrix in) {
	if ( !in.isSquare() )
		throw new DMLRuntimeException("Input to inv() must be square matrix -- given: a " + in.getRowDimension() + "x" + in.getColumnDimension() + " matrix.");
	
	QRDecomposition qrdecompose = new QRDecomposition(in);
	DecompositionSolver solver = qrdecompose.getSolver();
	RealMatrix inverseMatrix = solver.getInverse();

	return DataConverter.convertToMatrixBlock(inverseMatrix.getData());
}
 
@Override
public SlopeCoefficients estimateCoefficients(final DerivationEquation eq)
    throws EstimationException {
  final double[][] sourceTriangleMatrix = eq.getCovarianceLowerTriangularMatrix();
  // Copy matrix and enhance it to a full matrix as expected by CholeskyDecomposition
  // FIXME: Avoid copy job to speed-up the solving process e.g. by extending the CholeskyDecomposition constructor
  final int length = sourceTriangleMatrix.length;
  final double[][] matrix = new double[length][];
  for (int i = 0; i < length; i++) {
    matrix[i] = new double[length];
    final double[] s = sourceTriangleMatrix[i];
    final double[] t = matrix[i];
    for (int j = 0; j <= i; j++) {
      t[j] = s[j];
    }
    for (int j = i + 1; j < length; j++) {
      t[j] = sourceTriangleMatrix[j][i];
    }
  }
  final RealMatrix coefficients =
      new Array2DRowRealMatrix(matrix, false);
  try {
    final DecompositionSolver solver = new CholeskyDecomposition(coefficients).getSolver();
    final RealVector constants = new ArrayRealVector(eq.getConstraints(), true);
    final RealVector solution = solver.solve(constants);
    return new DefaultSlopeCoefficients(solution.toArray());
  } catch (final NonPositiveDefiniteMatrixException e) {
    throw new EstimationException("Matrix inversion error due to data is linearly dependent", e);
  }
}
 
源代码15 项目: orbit-image-analysis   文件: ApacheSolver.java
/**
 * @see AbstractSolver#solve(AbstractMatrix, AbstractVector)
 */
@Override
public AbstractVector solve(AbstractMatrix m, AbstractVector b) {
  if (m instanceof ApacheMatrix && b instanceof ApacheVector) {
    DecompositionSolver solver = new LUDecomposition(((ApacheMatrix) m).getMatrix()).getSolver();
    RealVector bApache = ((ApacheVector) b).getVector();
    RealVector solution = solver.solve(bApache);
    return new ApacheVector(solution);
  }
  throw new UnsupportedOperationException();
}
 
源代码16 项目: incubator-hivemall   文件: StatsUtils.java
/**
 * pdf(x, x_hat) = exp(-0.5 * (x-x_hat) * inv(Σ) * (x-x_hat)T) / ( 2π^0.5d * det(Σ)^0.5)
 * 
 * @return value of probabilistic density function
 * @link https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Density_function
 */
public static double pdf(@Nonnull final RealVector x, @Nonnull final RealVector x_hat,
        @Nonnull final RealMatrix sigma) {
    final int dim = x.getDimension();
    Preconditions.checkArgument(x_hat.getDimension() == dim,
        "|x| != |x_hat|, |x|=" + dim + ", |x_hat|=" + x_hat.getDimension());
    Preconditions.checkArgument(sigma.getRowDimension() == dim,
        "|x| != |sigma|, |x|=" + dim + ", |sigma|=" + sigma.getRowDimension());
    Preconditions.checkArgument(sigma.isSquare(), "Sigma is not square matrix");

    LUDecomposition LU = new LUDecomposition(sigma);
    final double detSigma = LU.getDeterminant();
    double denominator = Math.pow(2.d * Math.PI, 0.5d * dim) * Math.pow(detSigma, 0.5d);
    if (denominator == 0.d) { // avoid divide by zero
        return 0.d;
    }

    final RealMatrix invSigma;
    DecompositionSolver solver = LU.getSolver();
    if (solver.isNonSingular() == false) {
        SingularValueDecomposition svd = new SingularValueDecomposition(sigma);
        invSigma = svd.getSolver().getInverse(); // least square solution
    } else {
        invSigma = solver.getInverse();
    }
    //EigenDecomposition eigen = new EigenDecomposition(sigma);
    //double detSigma = eigen.getDeterminant();
    //RealMatrix invSigma = eigen.getSolver().getInverse();

    RealVector diff = x.subtract(x_hat);
    RealVector premultiplied = invSigma.preMultiply(diff);
    double sum = premultiplied.dotProduct(diff);
    double numerator = Math.exp(-0.5d * sum);

    return numerator / denominator;
}
 
源代码17 项目: incubator-hivemall   文件: MatrixUtils.java
@Nonnull
public static RealMatrix inverse(@Nonnull final RealMatrix m, final boolean exact)
        throws SingularMatrixException {
    LUDecomposition LU = new LUDecomposition(m);
    DecompositionSolver solver = LU.getSolver();
    final RealMatrix inv;
    if (exact || solver.isNonSingular()) {
        inv = solver.getInverse();
    } else {
        SingularValueDecomposition SVD = new SingularValueDecomposition(m);
        inv = SVD.getSolver().getInverse();
    }
    return inv;
}
 
源代码18 项目: incubator-hivemall   文件: MatrixUtils.java
/**
 * L = A x R
 *
 * @return a matrix A that minimizes A x R - L
 */
@Nonnull
public static RealMatrix solve(@Nonnull final RealMatrix L, @Nonnull final RealMatrix R,
        final boolean exact) throws SingularMatrixException {
    LUDecomposition LU = new LUDecomposition(R);
    DecompositionSolver solver = LU.getSolver();
    final RealMatrix A;
    if (exact || solver.isNonSingular()) {
        A = LU.getSolver().solve(L);
    } else {
        SingularValueDecomposition SVD = new SingularValueDecomposition(R);
        A = SVD.getSolver().solve(L);
    }
    return A;
}
 
源代码19 项目: systemds   文件: LibCommonsMath.java
/**
 * Function to compute matrix inverse via matrix decomposition.
 * 
 * @param in commons-math3 Array2DRowRealMatrix
 * @return matrix block
 */
private static MatrixBlock computeMatrixInverse(Array2DRowRealMatrix in) {
	if ( !in.isSquare() )
		throw new DMLRuntimeException("Input to inv() must be square matrix -- given: a " + in.getRowDimension() + "x" + in.getColumnDimension() + " matrix.");
	
	QRDecomposition qrdecompose = new QRDecomposition(in);
	DecompositionSolver solver = qrdecompose.getSolver();
	RealMatrix inverseMatrix = solver.getInverse();

	return DataConverter.convertToMatrixBlock(inverseMatrix.getData());
}
 
源代码20 项目: astor   文件: AbstractEvaluation.java
/** {@inheritDoc} */
public RealMatrix getCovariances(double threshold) {
    // Set up the Jacobian.
    final RealMatrix j = this.getJacobian();

    // Compute transpose(J)J.
    final RealMatrix jTj = j.transpose().multiply(j);

    // Compute the covariances matrix.
    final DecompositionSolver solver
            = new QRDecomposition(jTj, threshold).getSolver();
    return solver.getInverse();
}
 
源代码21 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z
 *            the measurement vector
 * @throws NullArgumentException
 *             if the measurement vector is {@code null}
 * @throws DimensionMismatchException
 *             if the dimension of the measurement vector does not fit
 * @throws SingularMatrixException
 *             if the covariance matrix could not be inverted
 */
public void correct(final RealVector z)
        throws NullArgumentException, DimensionMismatchException, SingularMatrixException {

    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码22 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z the measurement vector
 * @throws DimensionMismatchException if the dimension of the
 * measurement vector does not fit
 * @throws org.apache.commons.math3.linear.SingularMatrixException
 * if the covariance matrix could not be inverted
 */
public void correct(final RealVector z) {
    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码23 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z
 *            the measurement vector
 * @throws NullArgumentException
 *             if the measurement vector is {@code null}
 * @throws DimensionMismatchException
 *             if the dimension of the measurement vector does not fit
 * @throws SingularMatrixException
 *             if the covariance matrix could not be inverted
 */
public void correct(final RealVector z)
        throws NullArgumentException, DimensionMismatchException, SingularMatrixException {

    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码24 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z the measurement vector
 * @throws DimensionMismatchException if the dimension of the
 * measurement vector does not fit
 * @throws org.apache.commons.math3.linear.SingularMatrixException
 * if the covariance matrix could not be inverted
 */
public void correct(final RealVector z) {
    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码25 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z
 *            the measurement vector
 * @throws NullArgumentException
 *             if the measurement vector is {@code null}
 * @throws DimensionMismatchException
 *             if the dimension of the measurement vector does not fit
 * @throws SingularMatrixException
 *             if the covariance matrix could not be inverted
 */
public void correct(final RealVector z)
        throws NullArgumentException, DimensionMismatchException, SingularMatrixException {

    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码26 项目: astor   文件: KalmanFilter.java
/**
 * Correct the current state estimate with an actual measurement.
 *
 * @param z
 *            the measurement vector
 * @throws NullArgumentException
 *             if the measurement vector is {@code null}
 * @throws DimensionMismatchException
 *             if the dimension of the measurement vector does not fit
 * @throws SingularMatrixException
 *             if the covariance matrix could not be inverted
 */
public void correct(final RealVector z)
        throws NullArgumentException, DimensionMismatchException, SingularMatrixException {

    // sanity checks
    MathUtils.checkNotNull(z);
    if (z.getDimension() != measurementMatrix.getRowDimension()) {
        throw new DimensionMismatchException(z.getDimension(),
                                             measurementMatrix.getRowDimension());
    }

    // S = H * P(k) - * H' + R
    RealMatrix s = measurementMatrix.multiply(errorCovariance)
        .multiply(measurementMatrixT)
        .add(measurementModel.getMeasurementNoise());

    // invert S
    // as the error covariance matrix is a symmetric positive
    // semi-definite matrix, we can use the cholesky decomposition
    DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
    RealMatrix invertedS = solver.getInverse();

    // Inn = z(k) - H * xHat(k)-
    RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

    // calculate gain matrix
    // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
    // K(k) = P(k)- * H' * S^-1
    RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

    // update estimate with measurement z(k)
    // xHat(k) = xHat(k)- + K * Inn
    stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

    // update covariance of prediction error
    // P(k) = (I - K * H) * P(k)-
    RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
    errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
}
 
源代码27 项目: astor   文件: AbstractEvaluation.java
/** {@inheritDoc} */
public RealMatrix getCovariances(double threshold) {
    // Set up the Jacobian.
    final RealMatrix j = this.getJacobian();

    // Compute transpose(J)J.
    final RealMatrix jTj = j.transpose().multiply(j);

    // Compute the covariances matrix.
    final DecompositionSolver solver
            = new QRDecomposition(jTj, threshold).getSolver();
    return solver.getInverse();
}
 
源代码28 项目: hortonmachine   文件: OmsCurvaturesBivariate.java
/**
 * Calculates the parameters of a bivariate quadratic equation.
 * 
 * @param elevationValues the window of points to use.
 * @return the parameters of the bivariate quadratic equation as [a, b, c, d, e, f]
 */
private static double[] calculateParameters( final double[][] elevationValues ) {
    int rows = elevationValues.length;
    int cols = elevationValues[0].length;
    int pointsNum = rows * cols;

    final double[][] xyMatrix = new double[pointsNum][6];
    final double[] valueArray = new double[pointsNum];

    // TODO check on resolution
    int index = 0;
    for( int y = 0; y < rows; y++ ) {
        for( int x = 0; x < cols; x++ ) {
            xyMatrix[index][0] = x * x; // x^2
            xyMatrix[index][1] = y * y; // y^2
            xyMatrix[index][2] = x * y; // xy
            xyMatrix[index][3] = x; // x
            xyMatrix[index][4] = y; // y
            xyMatrix[index][5] = 1;
            valueArray[index] = elevationValues[y][x];
            index++;
        }
    }

    RealMatrix A = MatrixUtils.createRealMatrix(xyMatrix);
    RealVector z = MatrixUtils.createRealVector(valueArray);

    DecompositionSolver solver = new RRQRDecomposition(A).getSolver();
    RealVector solution = solver.solve(z);

    // start values for a, b, c, d, e, f, all set to 0.0
    final double[] parameters = solution.toArray();
    return parameters;
}
 
/**
 * Return the solution x of XTX x = XT y for a given y.
 * @TODO Performance upon repeated call can be optimized by caching XTX.
 *
 * @param dependents The sample vector of the random variable y.
 * @return The solution x of XTX x = XT y.
 */
@Override
public double[] getLinearRegressionParameters(RandomVariable dependents) {

	final RandomVariable localizerWeights = dependents.squared().sub(Math.pow(dependents.getStandardDeviation()*standardDeviations,2.0)).choose(new Scalar(0.0), new Scalar(1.0));

	// Localize basis functions
	final RandomVariable[] basisFunctionsNonLocalized = getBasisFunctionsEstimator().getBasisFunctions();
	final RandomVariable[] basisFunctions = new RandomVariable[basisFunctionsNonLocalized.length];
	for(int i=0; i<basisFunctions.length; i++) {
		basisFunctions[i] = basisFunctionsNonLocalized[i].mult(localizerWeights);
	}

	// Localize dependents
	dependents = dependents.mult(localizerWeights);

	// Build XTX - the symmetric matrix consisting of the scalar products of the basis functions.
	final double[][] XTX = new double[basisFunctions.length][basisFunctions.length];
	for(int i=0; i<basisFunctions.length; i++) {
		for(int j=i; j<basisFunctions.length; j++) {
			XTX[i][j] = basisFunctions[i].mult(basisFunctions[j]).getAverage();	// Scalar product
			XTX[j][i] = XTX[i][j];												// Symmetric matrix
		}
	}

	final DecompositionSolver solver = new SingularValueDecomposition(new Array2DRowRealMatrix(XTX, false)).getSolver();

	// Build XTy - the projection of the dependents random variable on the basis functions.
	final double[] XTy = new double[basisFunctions.length];
	for(int i=0; i<basisFunctions.length; i++) {
		XTy[i] = dependents.mult(basisFunctions[i]).getAverage();				// Scalar product
	}

	// Solve X^T X x = X^T y - which gives us the regression coefficients x = linearRegressionParameters
	final double[] linearRegressionParameters = solver.solve(new ArrayRealVector(XTy)).toArray();

	return linearRegressionParameters;
}
 
源代码30 项目: finmath-lib   文件: LinearAlgebra.java
/**
 * Find a solution of the linear equation A x = b where
 * <ul>
 * <li>A is an n x m - matrix given as double[n][m]</li>
 * <li>b is an m - vector given as double[m],</li>
 * <li>x is an n - vector given as double[n],</li>
 * </ul>
 * using a standard Tikhonov regularization, i.e., we solve in the least square sense
 *   A* x = b*
 * where A* = (A^T, lambda I)^T and b* = (b^T , 0)^T.
 *
 * @param matrixA The matrix A (left hand side of the linear equation).
 * @param b The vector (right hand of the linear equation).
 * @param lambda The parameter lambda of the Tikhonov regularization. Lambda effectively measures which small numbers are considered zero.
 * @return A solution x to A x = b.
 */
public static double[] solveLinearEquationTikonov(final double[][] matrixA, final double[] b, final double lambda) {
	if(lambda == 0) {
		return solveLinearEquationLeastSquare(matrixA, b);
	}

	/*
	 * The copy of the array is inefficient, but the use cases for this method are currently limited.
	 * And SVD is an alternative to this method.
	 */
	final int rows = matrixA.length;
	final int cols = matrixA[0].length;
	final double[][] matrixRegularized = new double[rows+cols][cols];
	final double[] bRegularized = new double[rows+cols];					// Note the JVM initializes arrays to zero.
	for(int i=0; i<rows; i++) {
		System.arraycopy(matrixA[i], 0, matrixRegularized[i], 0, cols);
	}
	System.arraycopy(b, 0, bRegularized, 0, rows);

	for(int j=0; j<cols; j++) {
		final double[] matrixRow = matrixRegularized[rows+j];

		matrixRow[j] = lambda;
	}


	//		return solveLinearEquationLeastSquare(matrixRegularized, bRegularized);
	final DecompositionSolver solver = new QRDecomposition(new Array2DRowRealMatrix(matrixRegularized, false)).getSolver();
	return solver.solve(new ArrayRealVector(bRegularized, false)).toArray();
}