id
stringlengths 5
19
| content
stringlengths 94
57.5k
| max_stars_repo_path
stringlengths 36
95
|
---|---|---|
Math-31 | public double evaluate(double x, double epsilon, int maxIterations) {
final double small = 1e-50;
double hPrev = getA(0, x);
// use the value of small as epsilon criteria for zero checks
if (Precision.equals(hPrev, 0.0, small)) {
hPrev = small;
}
int n = 1;
double dPrev = 0.0;
double p0 = 1.0;
double q1 = 1.0;
double cPrev = hPrev;
double hN = hPrev;
while (n < maxIterations) {
final double a = getA(n, x);
final double b = getB(n, x);
double cN = a * hPrev + b * p0;
double q2 = a * q1 + b * dPrev;
if (Double.isInfinite(cN) || Double.isInfinite(q2)) {
double scaleFactor = 1d;
double lastScaleFactor = 1d;
final int maxPower = 5;
final double scale = FastMath.max(a,b);
if (scale <= 0) { // Can't scale
throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE, x);
}
for (int i = 0; i < maxPower; i++) {
lastScaleFactor = scaleFactor;
scaleFactor *= scale;
if (a != 0.0 && a > b) {
cN = hPrev / lastScaleFactor + (b / scaleFactor * p0);
q2 = q1 / lastScaleFactor + (b / scaleFactor * dPrev);
} else if (b != 0) {
cN = (a / scaleFactor * hPrev) + p0 / lastScaleFactor;
q2 = (a / scaleFactor * q1) + dPrev / lastScaleFactor;
}
if (!(Double.isInfinite(cN) || Double.isInfinite(q2))) {
break;
}
}
}
final double deltaN = cN / q2 / cPrev;
hN = cPrev * deltaN;
if (Double.isInfinite(hN)) {
throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE,
x);
}
if (Double.isNaN(hN)) {
throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_NAN_DIVERGENCE,
x);
}
if (FastMath.abs(deltaN - 1.0) < epsilon) {
break;
}
dPrev = q1;
cPrev = cN / q2;
p0 = hPrev;
hPrev = cN;
q1 = q2;
n++;
}
if (n >= maxIterations) {
throw new MaxCountExceededException(LocalizedFormats.NON_CONVERGENT_CONTINUED_FRACTION,
maxIterations, x);
}
return hN;
}
public double evaluate(double x, double epsilon, int maxIterations) {
final double small = 1e-50;
double hPrev = getA(0, x);
// use the value of small as epsilon criteria for zero checks
if (Precision.equals(hPrev, 0.0, small)) {
hPrev = small;
}
int n = 1;
double dPrev = 0.0;
double cPrev = hPrev;
double hN = hPrev;
while (n < maxIterations) {
final double a = getA(n, x);
final double b = getB(n, x);
double dN = a + b * dPrev;
if (Precision.equals(dN, 0.0, small)) {
dN = small;
}
double cN = a + b / cPrev;
if (Precision.equals(cN, 0.0, small)) {
cN = small;
}
dN = 1 / dN;
final double deltaN = cN * dN;
hN = hPrev * deltaN;
if (Double.isInfinite(hN)) {
throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_INFINITY_DIVERGENCE,
x);
}
if (Double.isNaN(hN)) {
throw new ConvergenceException(LocalizedFormats.CONTINUED_FRACTION_NAN_DIVERGENCE,
x);
}
if (FastMath.abs(deltaN - 1.0) < epsilon) {
break;
}
dPrev = dN;
cPrev = cN;
hPrev = hN;
n++;
}
if (n >= maxIterations) {
throw new MaxCountExceededException(LocalizedFormats.NON_CONVERGENT_CONTINUED_FRACTION,
maxIterations, x);
}
return hN;
} | src/main/java/org/apache/commons/math3/util/ContinuedFraction.java |
Math-32 | protected void computeGeometricalProperties() {
final Vector2D[][] v = getVertices();
if (v.length == 0) {
final BSPTree<Euclidean2D> tree = getTree(false);
if ((Boolean) tree.getAttribute()) {
// the instance covers the whole space
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
setSize(0);
setBarycenter(new Vector2D(0, 0));
}
} else if (v[0][0] == null) {
// there is at least one open-loop: the polygon is infinite
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
// all loops are closed, we compute some integrals around the shape
double sum = 0;
double sumX = 0;
double sumY = 0;
for (Vector2D[] loop : v) {
double x1 = loop[loop.length - 1].getX();
double y1 = loop[loop.length - 1].getY();
for (final Vector2D point : loop) {
final double x0 = x1;
final double y0 = y1;
x1 = point.getX();
y1 = point.getY();
final double factor = x0 * y1 - y0 * x1;
sum += factor;
sumX += factor * (x0 + x1);
sumY += factor * (y0 + y1);
}
}
if (sum < 0) {
// the polygon as a finite outside surrounded by an infinite inside
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
setSize(sum / 2);
setBarycenter(new Vector2D(sumX / (3 * sum), sumY / (3 * sum)));
}
}
}
protected void computeGeometricalProperties() {
final Vector2D[][] v = getVertices();
if (v.length == 0) {
final BSPTree<Euclidean2D> tree = getTree(false);
if (tree.getCut() == null && (Boolean) tree.getAttribute()) {
// the instance covers the whole space
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
setSize(0);
setBarycenter(new Vector2D(0, 0));
}
} else if (v[0][0] == null) {
// there is at least one open-loop: the polygon is infinite
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
// all loops are closed, we compute some integrals around the shape
double sum = 0;
double sumX = 0;
double sumY = 0;
for (Vector2D[] loop : v) {
double x1 = loop[loop.length - 1].getX();
double y1 = loop[loop.length - 1].getY();
for (final Vector2D point : loop) {
final double x0 = x1;
final double y0 = y1;
x1 = point.getX();
y1 = point.getY();
final double factor = x0 * y1 - y0 * x1;
sum += factor;
sumX += factor * (x0 + x1);
sumY += factor * (y0 + y1);
}
}
if (sum < 0) {
// the polygon as a finite outside surrounded by an infinite inside
setSize(Double.POSITIVE_INFINITY);
setBarycenter(Vector2D.NaN);
} else {
setSize(sum / 2);
setBarycenter(new Vector2D(sumX / (3 * sum), sumY / (3 * sum)));
}
}
} | src/main/java/org/apache/commons/math3/geometry/euclidean/twod/PolygonsSet.java |
Math-33 | protected void dropPhase1Objective() {
if (getNumObjectiveFunctions() == 1) {
return;
}
List<Integer> columnsToDrop = new ArrayList<Integer>();
columnsToDrop.add(0);
// positive cost non-artificial variables
for (int i = getNumObjectiveFunctions(); i < getArtificialVariableOffset(); i++) {
final double entry = tableau.getEntry(0, i);
if (Precision.compareTo(entry, 0d, maxUlps) > 0) {
columnsToDrop.add(i);
}
}
// non-basic artificial variables
for (int i = 0; i < getNumArtificialVariables(); i++) {
int col = i + getArtificialVariableOffset();
if (getBasicRow(col) == null) {
columnsToDrop.add(col);
}
}
double[][] matrix = new double[getHeight() - 1][getWidth() - columnsToDrop.size()];
for (int i = 1; i < getHeight(); i++) {
int col = 0;
for (int j = 0; j < getWidth(); j++) {
if (!columnsToDrop.contains(j)) {
matrix[i - 1][col++] = tableau.getEntry(i, j);
}
}
}
for (int i = columnsToDrop.size() - 1; i >= 0; i--) {
columnLabels.remove((int) columnsToDrop.get(i));
}
this.tableau = new Array2DRowRealMatrix(matrix);
this.numArtificialVariables = 0;
}
protected void dropPhase1Objective() {
if (getNumObjectiveFunctions() == 1) {
return;
}
List<Integer> columnsToDrop = new ArrayList<Integer>();
columnsToDrop.add(0);
// positive cost non-artificial variables
for (int i = getNumObjectiveFunctions(); i < getArtificialVariableOffset(); i++) {
final double entry = tableau.getEntry(0, i);
if (Precision.compareTo(entry, 0d, epsilon) > 0) {
columnsToDrop.add(i);
}
}
// non-basic artificial variables
for (int i = 0; i < getNumArtificialVariables(); i++) {
int col = i + getArtificialVariableOffset();
if (getBasicRow(col) == null) {
columnsToDrop.add(col);
}
}
double[][] matrix = new double[getHeight() - 1][getWidth() - columnsToDrop.size()];
for (int i = 1; i < getHeight(); i++) {
int col = 0;
for (int j = 0; j < getWidth(); j++) {
if (!columnsToDrop.contains(j)) {
matrix[i - 1][col++] = tableau.getEntry(i, j);
}
}
}
for (int i = columnsToDrop.size() - 1; i >= 0; i--) {
columnLabels.remove((int) columnsToDrop.get(i));
}
this.tableau = new Array2DRowRealMatrix(matrix);
this.numArtificialVariables = 0;
} | src/main/java/org/apache/commons/math3/optimization/linear/SimplexTableau.java |
Math-34 | public Iterator<Chromosome> iterator() {
return chromosomes.iterator();
}
public Iterator<Chromosome> iterator() {
return getChromosomes().iterator();
} | src/main/java/org/apache/commons/math3/genetics/ListPopulation.java |
Math-38 | private void prelim(double[] lowerBound,
double[] upperBound) {
printMethod(); // XXX
final int n = currentBest.getDimension();
final int npt = numberOfInterpolationPoints;
final int ndim = bMatrix.getRowDimension();
final double rhosq = initialTrustRegionRadius * initialTrustRegionRadius;
final double recip = 1d / rhosq;
final int np = n + 1;
// Set XBASE to the initial vector of variables, and set the initial
// elements of XPT, BMAT, HQ, PQ and ZMAT to zero.
for (int j = 0; j < n; j++) {
originShift.setEntry(j, currentBest.getEntry(j));
for (int k = 0; k < npt; k++) {
interpolationPoints.setEntry(k, j, ZERO);
}
for (int i = 0; i < ndim; i++) {
bMatrix.setEntry(i, j, ZERO);
}
}
for (int i = 0, max = n * np / 2; i < max; i++) {
modelSecondDerivativesValues.setEntry(i, ZERO);
}
for (int k = 0; k < npt; k++) {
modelSecondDerivativesParameters.setEntry(k, ZERO);
for (int j = 0, max = npt - np; j < max; j++) {
zMatrix.setEntry(k, j, ZERO);
}
}
// Begin the initialization procedure. NF becomes one more than the number
// of function values so far. The coordinates of the displacement of the
// next initial interpolation point from XBASE are set in XPT(NF+1,.).
int ipt = 0;
int jpt = 0;
double fbeg = Double.NaN;
do {
final int nfm = getEvaluations();
final int nfx = nfm - n;
final int nfmm = nfm - 1;
final int nfxm = nfx - 1;
double stepa = 0;
double stepb = 0;
if (nfm <= 2 * n) {
if (nfm >= 1 &&
nfm <= n) {
stepa = initialTrustRegionRadius;
if (upperDifference.getEntry(nfmm) == ZERO) {
stepa = -stepa;
throw new PathIsExploredException(); // XXX
}
interpolationPoints.setEntry(nfm, nfmm, stepa);
} else if (nfm > n) {
stepa = interpolationPoints.getEntry(nfx, nfxm);
stepb = -initialTrustRegionRadius;
if (lowerDifference.getEntry(nfxm) == ZERO) {
stepb = Math.min(TWO * initialTrustRegionRadius, upperDifference.getEntry(nfxm));
throw new PathIsExploredException(); // XXX
}
if (upperDifference.getEntry(nfxm) == ZERO) {
stepb = Math.max(-TWO * initialTrustRegionRadius, lowerDifference.getEntry(nfxm));
throw new PathIsExploredException(); // XXX
}
interpolationPoints.setEntry(nfm, nfxm, stepb);
}
} else {
final int tmp1 = (nfm - np) / n;
jpt = nfm - tmp1 * n - n;
ipt = jpt + tmp1;
if (ipt > n) {
final int tmp2 = jpt;
jpt = ipt - n;
ipt = tmp2;
throw new PathIsExploredException(); // XXX
}
final int iptMinus1 = ipt;
final int jptMinus1 = jpt;
interpolationPoints.setEntry(nfm, iptMinus1, interpolationPoints.getEntry(ipt, iptMinus1));
interpolationPoints.setEntry(nfm, jptMinus1, interpolationPoints.getEntry(jpt, jptMinus1));
}
// Calculate the next value of F. The least function value so far and
// its index are required.
for (int j = 0; j < n; j++) {
currentBest.setEntry(j, Math.min(Math.max(lowerBound[j],
originShift.getEntry(j) + interpolationPoints.getEntry(nfm, j)),
upperBound[j]));
if (interpolationPoints.getEntry(nfm, j) == lowerDifference.getEntry(j)) {
currentBest.setEntry(j, lowerBound[j]);
}
if (interpolationPoints.getEntry(nfm, j) == upperDifference.getEntry(j)) {
currentBest.setEntry(j, upperBound[j]);
}
}
final double objectiveValue = computeObjectiveValue(currentBest.toArray());
final double f = isMinimize ? objectiveValue : -objectiveValue;
final int numEval = getEvaluations(); // nfm + 1
fAtInterpolationPoints.setEntry(nfm, f);
if (numEval == 1) {
fbeg = f;
trustRegionCenterInterpolationPointIndex = 0;
} else if (f < fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex)) {
trustRegionCenterInterpolationPointIndex = nfm;
}
// Set the nonzero initial elements of BMAT and the quadratic model in the
// cases when NF is at most 2*N+1. If NF exceeds N+1, then the positions
// of the NF-th and (NF-N)-th interpolation points may be switched, in
// order that the function value at the first of them contributes to the
// off-diagonal second derivative terms of the initial quadratic model.
if (numEval <= 2 * n + 1) {
if (numEval >= 2 &&
numEval <= n + 1) {
gradientAtTrustRegionCenter.setEntry(nfmm, (f - fbeg) / stepa);
if (npt < numEval + n) {
final double oneOverStepA = ONE / stepa;
bMatrix.setEntry(0, nfmm, -oneOverStepA);
bMatrix.setEntry(nfm, nfmm, oneOverStepA);
bMatrix.setEntry(npt + nfmm, nfmm, -HALF * rhosq);
throw new PathIsExploredException(); // XXX
}
} else if (numEval >= n + 2) {
final int ih = nfx * (nfx + 1) / 2 - 1;
final double tmp = (f - fbeg) / stepb;
final double diff = stepb - stepa;
modelSecondDerivativesValues.setEntry(ih, TWO * (tmp - gradientAtTrustRegionCenter.getEntry(nfxm)) / diff);
gradientAtTrustRegionCenter.setEntry(nfxm, (gradientAtTrustRegionCenter.getEntry(nfxm) * stepb - tmp * stepa) / diff);
if (stepa * stepb < ZERO) {
if (f < fAtInterpolationPoints.getEntry(nfm - n)) {
fAtInterpolationPoints.setEntry(nfm, fAtInterpolationPoints.getEntry(nfm - n));
fAtInterpolationPoints.setEntry(nfm - n, f);
if (trustRegionCenterInterpolationPointIndex == nfm) {
trustRegionCenterInterpolationPointIndex = nfm - n;
}
interpolationPoints.setEntry(nfm - n, nfxm, stepb);
interpolationPoints.setEntry(nfm, nfxm, stepa);
}
}
bMatrix.setEntry(0, nfxm, -(stepa + stepb) / (stepa * stepb));
bMatrix.setEntry(nfm, nfxm, -HALF / interpolationPoints.getEntry(nfm - n, nfxm));
bMatrix.setEntry(nfm - n, nfxm,
-bMatrix.getEntry(0, nfxm) - bMatrix.getEntry(nfm, nfxm));
zMatrix.setEntry(0, nfxm, Math.sqrt(TWO) / (stepa * stepb));
zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) / rhosq);
// zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) * recip); // XXX "testAckley" and "testDiffPow" fail.
zMatrix.setEntry(nfm - n, nfxm,
-zMatrix.getEntry(0, nfxm) - zMatrix.getEntry(nfm, nfxm));
}
// Set the off-diagonal second derivatives of the Lagrange functions and
// the initial quadratic model.
} else {
zMatrix.setEntry(0, nfxm, recip);
zMatrix.setEntry(nfm, nfxm, recip);
zMatrix.setEntry(ipt, nfxm, -recip);
zMatrix.setEntry(jpt, nfxm, -recip);
final int ih = ipt * (ipt - 1) / 2 + jpt - 1;
final double tmp = interpolationPoints.getEntry(nfm, ipt - 1) * interpolationPoints.getEntry(nfm, jpt - 1);
modelSecondDerivativesValues.setEntry(ih, (fbeg - fAtInterpolationPoints.getEntry(ipt) - fAtInterpolationPoints.getEntry(jpt) + f) / tmp);
throw new PathIsExploredException(); // XXX
}
} while (getEvaluations() < npt);
} // prelim
private void prelim(double[] lowerBound,
double[] upperBound) {
printMethod(); // XXX
final int n = currentBest.getDimension();
final int npt = numberOfInterpolationPoints;
final int ndim = bMatrix.getRowDimension();
final double rhosq = initialTrustRegionRadius * initialTrustRegionRadius;
final double recip = 1d / rhosq;
final int np = n + 1;
// Set XBASE to the initial vector of variables, and set the initial
// elements of XPT, BMAT, HQ, PQ and ZMAT to zero.
for (int j = 0; j < n; j++) {
originShift.setEntry(j, currentBest.getEntry(j));
for (int k = 0; k < npt; k++) {
interpolationPoints.setEntry(k, j, ZERO);
}
for (int i = 0; i < ndim; i++) {
bMatrix.setEntry(i, j, ZERO);
}
}
for (int i = 0, max = n * np / 2; i < max; i++) {
modelSecondDerivativesValues.setEntry(i, ZERO);
}
for (int k = 0; k < npt; k++) {
modelSecondDerivativesParameters.setEntry(k, ZERO);
for (int j = 0, max = npt - np; j < max; j++) {
zMatrix.setEntry(k, j, ZERO);
}
}
// Begin the initialization procedure. NF becomes one more than the number
// of function values so far. The coordinates of the displacement of the
// next initial interpolation point from XBASE are set in XPT(NF+1,.).
int ipt = 0;
int jpt = 0;
double fbeg = Double.NaN;
do {
final int nfm = getEvaluations();
final int nfx = nfm - n;
final int nfmm = nfm - 1;
final int nfxm = nfx - 1;
double stepa = 0;
double stepb = 0;
if (nfm <= 2 * n) {
if (nfm >= 1 &&
nfm <= n) {
stepa = initialTrustRegionRadius;
if (upperDifference.getEntry(nfmm) == ZERO) {
stepa = -stepa;
throw new PathIsExploredException(); // XXX
}
interpolationPoints.setEntry(nfm, nfmm, stepa);
} else if (nfm > n) {
stepa = interpolationPoints.getEntry(nfx, nfxm);
stepb = -initialTrustRegionRadius;
if (lowerDifference.getEntry(nfxm) == ZERO) {
stepb = Math.min(TWO * initialTrustRegionRadius, upperDifference.getEntry(nfxm));
throw new PathIsExploredException(); // XXX
}
if (upperDifference.getEntry(nfxm) == ZERO) {
stepb = Math.max(-TWO * initialTrustRegionRadius, lowerDifference.getEntry(nfxm));
throw new PathIsExploredException(); // XXX
}
interpolationPoints.setEntry(nfm, nfxm, stepb);
}
} else {
final int tmp1 = (nfm - np) / n;
jpt = nfm - tmp1 * n - n;
ipt = jpt + tmp1;
if (ipt > n) {
final int tmp2 = jpt;
jpt = ipt - n;
ipt = tmp2;
// throw new PathIsExploredException(); // XXX
}
final int iptMinus1 = ipt - 1;
final int jptMinus1 = jpt - 1;
interpolationPoints.setEntry(nfm, iptMinus1, interpolationPoints.getEntry(ipt, iptMinus1));
interpolationPoints.setEntry(nfm, jptMinus1, interpolationPoints.getEntry(jpt, jptMinus1));
}
// Calculate the next value of F. The least function value so far and
// its index are required.
for (int j = 0; j < n; j++) {
currentBest.setEntry(j, Math.min(Math.max(lowerBound[j],
originShift.getEntry(j) + interpolationPoints.getEntry(nfm, j)),
upperBound[j]));
if (interpolationPoints.getEntry(nfm, j) == lowerDifference.getEntry(j)) {
currentBest.setEntry(j, lowerBound[j]);
}
if (interpolationPoints.getEntry(nfm, j) == upperDifference.getEntry(j)) {
currentBest.setEntry(j, upperBound[j]);
}
}
final double objectiveValue = computeObjectiveValue(currentBest.toArray());
final double f = isMinimize ? objectiveValue : -objectiveValue;
final int numEval = getEvaluations(); // nfm + 1
fAtInterpolationPoints.setEntry(nfm, f);
if (numEval == 1) {
fbeg = f;
trustRegionCenterInterpolationPointIndex = 0;
} else if (f < fAtInterpolationPoints.getEntry(trustRegionCenterInterpolationPointIndex)) {
trustRegionCenterInterpolationPointIndex = nfm;
}
// Set the nonzero initial elements of BMAT and the quadratic model in the
// cases when NF is at most 2*N+1. If NF exceeds N+1, then the positions
// of the NF-th and (NF-N)-th interpolation points may be switched, in
// order that the function value at the first of them contributes to the
// off-diagonal second derivative terms of the initial quadratic model.
if (numEval <= 2 * n + 1) {
if (numEval >= 2 &&
numEval <= n + 1) {
gradientAtTrustRegionCenter.setEntry(nfmm, (f - fbeg) / stepa);
if (npt < numEval + n) {
final double oneOverStepA = ONE / stepa;
bMatrix.setEntry(0, nfmm, -oneOverStepA);
bMatrix.setEntry(nfm, nfmm, oneOverStepA);
bMatrix.setEntry(npt + nfmm, nfmm, -HALF * rhosq);
throw new PathIsExploredException(); // XXX
}
} else if (numEval >= n + 2) {
final int ih = nfx * (nfx + 1) / 2 - 1;
final double tmp = (f - fbeg) / stepb;
final double diff = stepb - stepa;
modelSecondDerivativesValues.setEntry(ih, TWO * (tmp - gradientAtTrustRegionCenter.getEntry(nfxm)) / diff);
gradientAtTrustRegionCenter.setEntry(nfxm, (gradientAtTrustRegionCenter.getEntry(nfxm) * stepb - tmp * stepa) / diff);
if (stepa * stepb < ZERO) {
if (f < fAtInterpolationPoints.getEntry(nfm - n)) {
fAtInterpolationPoints.setEntry(nfm, fAtInterpolationPoints.getEntry(nfm - n));
fAtInterpolationPoints.setEntry(nfm - n, f);
if (trustRegionCenterInterpolationPointIndex == nfm) {
trustRegionCenterInterpolationPointIndex = nfm - n;
}
interpolationPoints.setEntry(nfm - n, nfxm, stepb);
interpolationPoints.setEntry(nfm, nfxm, stepa);
}
}
bMatrix.setEntry(0, nfxm, -(stepa + stepb) / (stepa * stepb));
bMatrix.setEntry(nfm, nfxm, -HALF / interpolationPoints.getEntry(nfm - n, nfxm));
bMatrix.setEntry(nfm - n, nfxm,
-bMatrix.getEntry(0, nfxm) - bMatrix.getEntry(nfm, nfxm));
zMatrix.setEntry(0, nfxm, Math.sqrt(TWO) / (stepa * stepb));
zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) / rhosq);
// zMatrix.setEntry(nfm, nfxm, Math.sqrt(HALF) * recip); // XXX "testAckley" and "testDiffPow" fail.
zMatrix.setEntry(nfm - n, nfxm,
-zMatrix.getEntry(0, nfxm) - zMatrix.getEntry(nfm, nfxm));
}
// Set the off-diagonal second derivatives of the Lagrange functions and
// the initial quadratic model.
} else {
zMatrix.setEntry(0, nfxm, recip);
zMatrix.setEntry(nfm, nfxm, recip);
zMatrix.setEntry(ipt, nfxm, -recip);
zMatrix.setEntry(jpt, nfxm, -recip);
final int ih = ipt * (ipt - 1) / 2 + jpt - 1;
final double tmp = interpolationPoints.getEntry(nfm, ipt - 1) * interpolationPoints.getEntry(nfm, jpt - 1);
modelSecondDerivativesValues.setEntry(ih, (fbeg - fAtInterpolationPoints.getEntry(ipt) - fAtInterpolationPoints.getEntry(jpt) + f) / tmp);
// throw new PathIsExploredException(); // XXX
}
} while (getEvaluations() < npt);
} // prelim | src/main/java/org/apache/commons/math/optimization/direct/BOBYQAOptimizer.java |
Math-39 | public void integrate(final ExpandableStatefulODE equations, final double t)
throws MathIllegalStateException, MathIllegalArgumentException {
sanityChecks(equations, t);
setEquations(equations);
final boolean forward = t > equations.getTime();
// create some internal working arrays
final double[] y0 = equations.getCompleteState();
final double[] y = y0.clone();
final int stages = c.length + 1;
final double[][] yDotK = new double[stages][y.length];
final double[] yTmp = y0.clone();
final double[] yDotTmp = new double[y.length];
// set up an interpolator sharing the integrator arrays
final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy();
interpolator.reinitialize(this, yTmp, yDotK, forward,
equations.getPrimaryMapper(), equations.getSecondaryMappers());
interpolator.storeTime(equations.getTime());
// set up integration control objects
stepStart = equations.getTime();
double hNew = 0;
boolean firstTime = true;
initIntegration(equations.getTime(), y0, t);
// main integration loop
isLastStep = false;
do {
interpolator.shift();
// iterate over step size, ensuring local normalized error is smaller than 1
double error = 10;
while (error >= 1.0) {
if (firstTime || !fsal) {
// first stage
computeDerivatives(stepStart, y, yDotK[0]);
}
if (firstTime) {
final double[] scale = new double[mainSetDimension];
if (vecAbsoluteTolerance == null) {
for (int i = 0; i < scale.length; ++i) {
scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]);
}
} else {
for (int i = 0; i < scale.length; ++i) {
scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]);
}
}
hNew = initializeStep(forward, getOrder(), scale,
stepStart, y, yDotK[0], yTmp, yDotK[1]);
firstTime = false;
}
stepSize = hNew;
// next stages
for (int k = 1; k < stages; ++k) {
for (int j = 0; j < y0.length; ++j) {
double sum = a[k-1][0] * yDotK[0][j];
for (int l = 1; l < k; ++l) {
sum += a[k-1][l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
computeDerivatives(stepStart + c[k-1] * stepSize, yTmp, yDotK[k]);
}
// estimate the state at the end of the step
for (int j = 0; j < y0.length; ++j) {
double sum = b[0] * yDotK[0][j];
for (int l = 1; l < stages; ++l) {
sum += b[l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
// estimate the error at the end of the step
error = estimateError(yDotK, y, yTmp, stepSize);
if (error >= 1.0) {
// reject the step and attempt to reduce error by stepsize control
final double factor =
FastMath.min(maxGrowth,
FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
hNew = filterStep(stepSize * factor, forward, false);
}
}
// local error is small enough: accept the step, trigger events and step handlers
interpolator.storeTime(stepStart + stepSize);
System.arraycopy(yTmp, 0, y, 0, y0.length);
System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length);
stepStart = acceptStep(interpolator, y, yDotTmp, t);
System.arraycopy(y, 0, yTmp, 0, y.length);
if (!isLastStep) {
// prepare next step
interpolator.storeTime(stepStart);
if (fsal) {
// save the last evaluation for the next step
System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length);
}
// stepsize control for next step
final double factor =
FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
final double scaledH = stepSize * factor;
final double nextT = stepStart + scaledH;
final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
hNew = filterStep(scaledH, forward, nextIsLast);
final double filteredNextT = stepStart + hNew;
final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
if (filteredNextIsLast) {
hNew = t - stepStart;
}
}
} while (!isLastStep);
// dispatch results
equations.setTime(stepStart);
equations.setCompleteState(y);
resetInternalState();
}
public void integrate(final ExpandableStatefulODE equations, final double t)
throws MathIllegalStateException, MathIllegalArgumentException {
sanityChecks(equations, t);
setEquations(equations);
final boolean forward = t > equations.getTime();
// create some internal working arrays
final double[] y0 = equations.getCompleteState();
final double[] y = y0.clone();
final int stages = c.length + 1;
final double[][] yDotK = new double[stages][y.length];
final double[] yTmp = y0.clone();
final double[] yDotTmp = new double[y.length];
// set up an interpolator sharing the integrator arrays
final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy();
interpolator.reinitialize(this, yTmp, yDotK, forward,
equations.getPrimaryMapper(), equations.getSecondaryMappers());
interpolator.storeTime(equations.getTime());
// set up integration control objects
stepStart = equations.getTime();
double hNew = 0;
boolean firstTime = true;
initIntegration(equations.getTime(), y0, t);
// main integration loop
isLastStep = false;
do {
interpolator.shift();
// iterate over step size, ensuring local normalized error is smaller than 1
double error = 10;
while (error >= 1.0) {
if (firstTime || !fsal) {
// first stage
computeDerivatives(stepStart, y, yDotK[0]);
}
if (firstTime) {
final double[] scale = new double[mainSetDimension];
if (vecAbsoluteTolerance == null) {
for (int i = 0; i < scale.length; ++i) {
scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]);
}
} else {
for (int i = 0; i < scale.length; ++i) {
scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]);
}
}
hNew = initializeStep(forward, getOrder(), scale,
stepStart, y, yDotK[0], yTmp, yDotK[1]);
firstTime = false;
}
stepSize = hNew;
if (forward) {
if (stepStart + stepSize >= t) {
stepSize = t - stepStart;
}
} else {
if (stepStart + stepSize <= t) {
stepSize = t - stepStart;
}
}
// next stages
for (int k = 1; k < stages; ++k) {
for (int j = 0; j < y0.length; ++j) {
double sum = a[k-1][0] * yDotK[0][j];
for (int l = 1; l < k; ++l) {
sum += a[k-1][l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
computeDerivatives(stepStart + c[k-1] * stepSize, yTmp, yDotK[k]);
}
// estimate the state at the end of the step
for (int j = 0; j < y0.length; ++j) {
double sum = b[0] * yDotK[0][j];
for (int l = 1; l < stages; ++l) {
sum += b[l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
// estimate the error at the end of the step
error = estimateError(yDotK, y, yTmp, stepSize);
if (error >= 1.0) {
// reject the step and attempt to reduce error by stepsize control
final double factor =
FastMath.min(maxGrowth,
FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
hNew = filterStep(stepSize * factor, forward, false);
}
}
// local error is small enough: accept the step, trigger events and step handlers
interpolator.storeTime(stepStart + stepSize);
System.arraycopy(yTmp, 0, y, 0, y0.length);
System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length);
stepStart = acceptStep(interpolator, y, yDotTmp, t);
System.arraycopy(y, 0, yTmp, 0, y.length);
if (!isLastStep) {
// prepare next step
interpolator.storeTime(stepStart);
if (fsal) {
// save the last evaluation for the next step
System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length);
}
// stepsize control for next step
final double factor =
FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
final double scaledH = stepSize * factor;
final double nextT = stepStart + scaledH;
final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
hNew = filterStep(scaledH, forward, nextIsLast);
final double filteredNextT = stepStart + hNew;
final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
if (filteredNextIsLast) {
hNew = t - stepStart;
}
}
} while (!isLastStep);
// dispatch results
equations.setTime(stepStart);
equations.setCompleteState(y);
resetInternalState();
} | src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java |
Math-40 | protected double doSolve() {
// prepare arrays with the first points
final double[] x = new double[maximalOrder + 1];
final double[] y = new double[maximalOrder + 1];
x[0] = getMin();
x[1] = getStartValue();
x[2] = getMax();
verifySequence(x[0], x[1], x[2]);
// evaluate initial guess
y[1] = computeObjectiveValue(x[1]);
if (Precision.equals(y[1], 0.0, 1)) {
// return the initial guess if it is a perfect root.
return x[1];
}
// evaluate first endpoint
y[0] = computeObjectiveValue(x[0]);
if (Precision.equals(y[0], 0.0, 1)) {
// return the first endpoint if it is a perfect root.
return x[0];
}
int nbPoints;
int signChangeIndex;
if (y[0] * y[1] < 0) {
// reduce interval if it brackets the root
nbPoints = 2;
signChangeIndex = 1;
} else {
// evaluate second endpoint
y[2] = computeObjectiveValue(x[2]);
if (Precision.equals(y[2], 0.0, 1)) {
// return the second endpoint if it is a perfect root.
return x[2];
}
if (y[1] * y[2] < 0) {
// use all computed point as a start sampling array for solving
nbPoints = 3;
signChangeIndex = 2;
} else {
throw new NoBracketingException(x[0], x[2], y[0], y[2]);
}
}
// prepare a work array for inverse polynomial interpolation
final double[] tmpX = new double[x.length];
// current tightest bracketing of the root
double xA = x[signChangeIndex - 1];
double yA = y[signChangeIndex - 1];
double absYA = FastMath.abs(yA);
int agingA = 0;
double xB = x[signChangeIndex];
double yB = y[signChangeIndex];
double absYB = FastMath.abs(yB);
int agingB = 0;
// search loop
while (true) {
// check convergence of bracketing interval
final double xTol = getAbsoluteAccuracy() +
getRelativeAccuracy() * FastMath.max(FastMath.abs(xA), FastMath.abs(xB));
if (((xB - xA) <= xTol) || (FastMath.max(absYA, absYB) < getFunctionValueAccuracy())) {
switch (allowed) {
case ANY_SIDE :
return absYA < absYB ? xA : xB;
case LEFT_SIDE :
return xA;
case RIGHT_SIDE :
return xB;
case BELOW_SIDE :
return (yA <= 0) ? xA : xB;
case ABOVE_SIDE :
return (yA < 0) ? xB : xA;
default :
// this should never happen
throw new MathInternalError(null);
}
}
// target for the next evaluation point
double targetY;
if (agingA >= MAXIMAL_AGING) {
// we keep updating the high bracket, try to compensate this
targetY = -REDUCTION_FACTOR * yB;
} else if (agingB >= MAXIMAL_AGING) {
// we keep updating the low bracket, try to compensate this
targetY = -REDUCTION_FACTOR * yA;
} else {
// bracketing is balanced, try to find the root itself
targetY = 0;
}
// make a few attempts to guess a root,
double nextX;
int start = 0;
int end = nbPoints;
do {
// guess a value for current target, using inverse polynomial interpolation
System.arraycopy(x, start, tmpX, start, end - start);
nextX = guessX(targetY, tmpX, y, start, end);
if (!((nextX > xA) && (nextX < xB))) {
// the guessed root is not strictly inside of the tightest bracketing interval
// the guessed root is either not strictly inside the interval or it
// is a NaN (which occurs when some sampling points share the same y)
// we try again with a lower interpolation order
if (signChangeIndex - start >= end - signChangeIndex) {
// we have more points before the sign change, drop the lowest point
++start;
} else {
// we have more points after sign change, drop the highest point
--end;
}
// we need to do one more attempt
nextX = Double.NaN;
}
} while (Double.isNaN(nextX) && (end - start > 1));
if (Double.isNaN(nextX)) {
// fall back to bisection
nextX = xA + 0.5 * (xB - xA);
start = signChangeIndex - 1;
end = signChangeIndex;
}
// evaluate the function at the guessed root
final double nextY = computeObjectiveValue(nextX);
if (Precision.equals(nextY, 0.0, 1)) {
// we have found an exact root, since it is not an approximation
// we don't need to bother about the allowed solutions setting
return nextX;
}
if ((nbPoints > 2) && (end - start != nbPoints)) {
// we have been forced to ignore some points to keep bracketing,
// they are probably too far from the root, drop them from now on
nbPoints = end - start;
System.arraycopy(x, start, x, 0, nbPoints);
System.arraycopy(y, start, y, 0, nbPoints);
signChangeIndex -= start;
} else if (nbPoints == x.length) {
// we have to drop one point in order to insert the new one
nbPoints--;
// keep the tightest bracketing interval as centered as possible
if (signChangeIndex >= (x.length + 1) / 2) {
// we drop the lowest point, we have to shift the arrays and the index
System.arraycopy(x, 1, x, 0, nbPoints);
System.arraycopy(y, 1, y, 0, nbPoints);
--signChangeIndex;
}
}
// insert the last computed point
//(by construction, we know it lies inside the tightest bracketing interval)
System.arraycopy(x, signChangeIndex, x, signChangeIndex + 1, nbPoints - signChangeIndex);
x[signChangeIndex] = nextX;
System.arraycopy(y, signChangeIndex, y, signChangeIndex + 1, nbPoints - signChangeIndex);
y[signChangeIndex] = nextY;
++nbPoints;
// update the bracketing interval
if (nextY * yA <= 0) {
// the sign change occurs before the inserted point
xB = nextX;
yB = nextY;
absYB = FastMath.abs(yB);
++agingA;
agingB = 0;
} else {
// the sign change occurs after the inserted point
xA = nextX;
yA = nextY;
absYA = FastMath.abs(yA);
agingA = 0;
++agingB;
// update the sign change index
signChangeIndex++;
}
}
}
protected double doSolve() {
// prepare arrays with the first points
final double[] x = new double[maximalOrder + 1];
final double[] y = new double[maximalOrder + 1];
x[0] = getMin();
x[1] = getStartValue();
x[2] = getMax();
verifySequence(x[0], x[1], x[2]);
// evaluate initial guess
y[1] = computeObjectiveValue(x[1]);
if (Precision.equals(y[1], 0.0, 1)) {
// return the initial guess if it is a perfect root.
return x[1];
}
// evaluate first endpoint
y[0] = computeObjectiveValue(x[0]);
if (Precision.equals(y[0], 0.0, 1)) {
// return the first endpoint if it is a perfect root.
return x[0];
}
int nbPoints;
int signChangeIndex;
if (y[0] * y[1] < 0) {
// reduce interval if it brackets the root
nbPoints = 2;
signChangeIndex = 1;
} else {
// evaluate second endpoint
y[2] = computeObjectiveValue(x[2]);
if (Precision.equals(y[2], 0.0, 1)) {
// return the second endpoint if it is a perfect root.
return x[2];
}
if (y[1] * y[2] < 0) {
// use all computed point as a start sampling array for solving
nbPoints = 3;
signChangeIndex = 2;
} else {
throw new NoBracketingException(x[0], x[2], y[0], y[2]);
}
}
// prepare a work array for inverse polynomial interpolation
final double[] tmpX = new double[x.length];
// current tightest bracketing of the root
double xA = x[signChangeIndex - 1];
double yA = y[signChangeIndex - 1];
double absYA = FastMath.abs(yA);
int agingA = 0;
double xB = x[signChangeIndex];
double yB = y[signChangeIndex];
double absYB = FastMath.abs(yB);
int agingB = 0;
// search loop
while (true) {
// check convergence of bracketing interval
final double xTol = getAbsoluteAccuracy() +
getRelativeAccuracy() * FastMath.max(FastMath.abs(xA), FastMath.abs(xB));
if (((xB - xA) <= xTol) || (FastMath.max(absYA, absYB) < getFunctionValueAccuracy())) {
switch (allowed) {
case ANY_SIDE :
return absYA < absYB ? xA : xB;
case LEFT_SIDE :
return xA;
case RIGHT_SIDE :
return xB;
case BELOW_SIDE :
return (yA <= 0) ? xA : xB;
case ABOVE_SIDE :
return (yA < 0) ? xB : xA;
default :
// this should never happen
throw new MathInternalError(null);
}
}
// target for the next evaluation point
double targetY;
if (agingA >= MAXIMAL_AGING) {
// we keep updating the high bracket, try to compensate this
final int p = agingA - MAXIMAL_AGING;
final double weightA = (1 << p) - 1;
final double weightB = p + 1;
targetY = (weightA * yA - weightB * REDUCTION_FACTOR * yB) / (weightA + weightB);
} else if (agingB >= MAXIMAL_AGING) {
// we keep updating the low bracket, try to compensate this
final int p = agingB - MAXIMAL_AGING;
final double weightA = p + 1;
final double weightB = (1 << p) - 1;
targetY = (weightB * yB - weightA * REDUCTION_FACTOR * yA) / (weightA + weightB);
} else {
// bracketing is balanced, try to find the root itself
targetY = 0;
}
// make a few attempts to guess a root,
double nextX;
int start = 0;
int end = nbPoints;
do {
// guess a value for current target, using inverse polynomial interpolation
System.arraycopy(x, start, tmpX, start, end - start);
nextX = guessX(targetY, tmpX, y, start, end);
if (!((nextX > xA) && (nextX < xB))) {
// the guessed root is not strictly inside of the tightest bracketing interval
// the guessed root is either not strictly inside the interval or it
// is a NaN (which occurs when some sampling points share the same y)
// we try again with a lower interpolation order
if (signChangeIndex - start >= end - signChangeIndex) {
// we have more points before the sign change, drop the lowest point
++start;
} else {
// we have more points after sign change, drop the highest point
--end;
}
// we need to do one more attempt
nextX = Double.NaN;
}
} while (Double.isNaN(nextX) && (end - start > 1));
if (Double.isNaN(nextX)) {
// fall back to bisection
nextX = xA + 0.5 * (xB - xA);
start = signChangeIndex - 1;
end = signChangeIndex;
}
// evaluate the function at the guessed root
final double nextY = computeObjectiveValue(nextX);
if (Precision.equals(nextY, 0.0, 1)) {
// we have found an exact root, since it is not an approximation
// we don't need to bother about the allowed solutions setting
return nextX;
}
if ((nbPoints > 2) && (end - start != nbPoints)) {
// we have been forced to ignore some points to keep bracketing,
// they are probably too far from the root, drop them from now on
nbPoints = end - start;
System.arraycopy(x, start, x, 0, nbPoints);
System.arraycopy(y, start, y, 0, nbPoints);
signChangeIndex -= start;
} else if (nbPoints == x.length) {
// we have to drop one point in order to insert the new one
nbPoints--;
// keep the tightest bracketing interval as centered as possible
if (signChangeIndex >= (x.length + 1) / 2) {
// we drop the lowest point, we have to shift the arrays and the index
System.arraycopy(x, 1, x, 0, nbPoints);
System.arraycopy(y, 1, y, 0, nbPoints);
--signChangeIndex;
}
}
// insert the last computed point
//(by construction, we know it lies inside the tightest bracketing interval)
System.arraycopy(x, signChangeIndex, x, signChangeIndex + 1, nbPoints - signChangeIndex);
x[signChangeIndex] = nextX;
System.arraycopy(y, signChangeIndex, y, signChangeIndex + 1, nbPoints - signChangeIndex);
y[signChangeIndex] = nextY;
++nbPoints;
// update the bracketing interval
if (nextY * yA <= 0) {
// the sign change occurs before the inserted point
xB = nextX;
yB = nextY;
absYB = FastMath.abs(yB);
++agingA;
agingB = 0;
} else {
// the sign change occurs after the inserted point
xA = nextX;
yA = nextY;
absYA = FastMath.abs(yA);
agingA = 0;
++agingB;
// update the sign change index
signChangeIndex++;
}
}
} | src/main/java/org/apache/commons/math/analysis/solvers/BracketingNthOrderBrentSolver.java |
Math-41 | public double evaluate(final double[] values, final double[] weights,
final double mean, final int begin, final int length) {
double var = Double.NaN;
if (test(values, weights, begin, length)) {
if (length == 1) {
var = 0.0;
} else if (length > 1) {
double accum = 0.0;
double dev = 0.0;
double accum2 = 0.0;
for (int i = begin; i < begin + length; i++) {
dev = values[i] - mean;
accum += weights[i] * (dev * dev);
accum2 += weights[i] * dev;
}
double sumWts = 0;
for (int i = 0; i < weights.length; i++) {
sumWts += weights[i];
}
if (isBiasCorrected) {
var = (accum - (accum2 * accum2 / sumWts)) / (sumWts - 1.0);
} else {
var = (accum - (accum2 * accum2 / sumWts)) / sumWts;
}
}
}
return var;
}
public double evaluate(final double[] values, final double[] weights,
final double mean, final int begin, final int length) {
double var = Double.NaN;
if (test(values, weights, begin, length)) {
if (length == 1) {
var = 0.0;
} else if (length > 1) {
double accum = 0.0;
double dev = 0.0;
double accum2 = 0.0;
for (int i = begin; i < begin + length; i++) {
dev = values[i] - mean;
accum += weights[i] * (dev * dev);
accum2 += weights[i] * dev;
}
double sumWts = 0;
for (int i = begin; i < begin + length; i++) {
sumWts += weights[i];
}
if (isBiasCorrected) {
var = (accum - (accum2 * accum2 / sumWts)) / (sumWts - 1.0);
} else {
var = (accum - (accum2 * accum2 / sumWts)) / sumWts;
}
}
}
return var;
} | src/main/java/org/apache/commons/math/stat/descriptive/moment/Variance.java |
Math-42 | protected RealPointValuePair getSolution() {
int negativeVarColumn = columnLabels.indexOf(NEGATIVE_VAR_COLUMN_LABEL);
Integer negativeVarBasicRow = negativeVarColumn > 0 ? getBasicRow(negativeVarColumn) : null;
double mostNegative = negativeVarBasicRow == null ? 0 : getEntry(negativeVarBasicRow, getRhsOffset());
Set<Integer> basicRows = new HashSet<Integer>();
double[] coefficients = new double[getOriginalNumDecisionVariables()];
for (int i = 0; i < coefficients.length; i++) {
int colIndex = columnLabels.indexOf("x" + i);
if (colIndex < 0) {
coefficients[i] = 0;
continue;
}
Integer basicRow = getBasicRow(colIndex);
// if the basic row is found to be the objective function row
// set the coefficient to 0 -> this case handles unconstrained
// variables that are still part of the objective function
if (basicRows.contains(basicRow)) {
// if multiple variables can take a given value
// then we choose the first and set the rest equal to 0
coefficients[i] = 0 - (restrictToNonNegative ? 0 : mostNegative);
} else {
basicRows.add(basicRow);
coefficients[i] =
(basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
(restrictToNonNegative ? 0 : mostNegative);
}
}
return new RealPointValuePair(coefficients, f.getValue(coefficients));
}
protected RealPointValuePair getSolution() {
int negativeVarColumn = columnLabels.indexOf(NEGATIVE_VAR_COLUMN_LABEL);
Integer negativeVarBasicRow = negativeVarColumn > 0 ? getBasicRow(negativeVarColumn) : null;
double mostNegative = negativeVarBasicRow == null ? 0 : getEntry(negativeVarBasicRow, getRhsOffset());
Set<Integer> basicRows = new HashSet<Integer>();
double[] coefficients = new double[getOriginalNumDecisionVariables()];
for (int i = 0; i < coefficients.length; i++) {
int colIndex = columnLabels.indexOf("x" + i);
if (colIndex < 0) {
coefficients[i] = 0;
continue;
}
Integer basicRow = getBasicRow(colIndex);
if (basicRow != null && basicRow == 0) {
// if the basic row is found to be the objective function row
// set the coefficient to 0 -> this case handles unconstrained
// variables that are still part of the objective function
coefficients[i] = 0;
} else if (basicRows.contains(basicRow)) {
// if multiple variables can take a given value
// then we choose the first and set the rest equal to 0
coefficients[i] = 0 - (restrictToNonNegative ? 0 : mostNegative);
} else {
basicRows.add(basicRow);
coefficients[i] =
(basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
(restrictToNonNegative ? 0 : mostNegative);
}
}
return new RealPointValuePair(coefficients, f.getValue(coefficients));
} | src/main/java/org/apache/commons/math/optimization/linear/SimplexTableau.java |
Math-43 | public void addValue(double value) {
sumImpl.increment(value);
sumsqImpl.increment(value);
minImpl.increment(value);
maxImpl.increment(value);
sumLogImpl.increment(value);
secondMoment.increment(value);
// If mean, variance or geomean have been overridden,
// need to increment these
if (!(meanImpl instanceof Mean)) {
meanImpl.increment(value);
}
if (!(varianceImpl instanceof Variance)) {
varianceImpl.increment(value);
}
if (!(geoMeanImpl instanceof GeometricMean)) {
geoMeanImpl.increment(value);
}
n++;
}
public void addValue(double value) {
sumImpl.increment(value);
sumsqImpl.increment(value);
minImpl.increment(value);
maxImpl.increment(value);
sumLogImpl.increment(value);
secondMoment.increment(value);
// If mean, variance or geomean have been overridden,
// need to increment these
if (meanImpl != mean) {
meanImpl.increment(value);
}
if (varianceImpl != variance) {
varianceImpl.increment(value);
}
if (geoMeanImpl != geoMean) {
geoMeanImpl.increment(value);
}
n++;
} | src/main/java/org/apache/commons/math/stat/descriptive/SummaryStatistics.java |
Math-44 | protected double acceptStep(final AbstractStepInterpolator interpolator,
final double[] y, final double[] yDot, final double tEnd)
throws MathIllegalStateException {
double previousT = interpolator.getGlobalPreviousTime();
final double currentT = interpolator.getGlobalCurrentTime();
resetOccurred = false;
// initialize the events states if needed
if (! statesInitialized) {
for (EventState state : eventsStates) {
state.reinitializeBegin(interpolator);
}
statesInitialized = true;
}
// search for next events that may occur during the step
final int orderingSign = interpolator.isForward() ? +1 : -1;
SortedSet<EventState> occuringEvents = new TreeSet<EventState>(new Comparator<EventState>() {
/** {@inheritDoc} */
public int compare(EventState es0, EventState es1) {
return orderingSign * Double.compare(es0.getEventTime(), es1.getEventTime());
}
});
for (final EventState state : eventsStates) {
if (state.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(state);
}
}
while (!occuringEvents.isEmpty()) {
// handle the chronologically first event
final Iterator<EventState> iterator = occuringEvents.iterator();
final EventState currentEvent = iterator.next();
iterator.remove();
// restrict the interpolator to the first part of the step, up to the event
final double eventT = currentEvent.getEventTime();
interpolator.setSoftPreviousTime(previousT);
interpolator.setSoftCurrentTime(eventT);
// trigger the event
interpolator.setInterpolatedTime(eventT);
final double[] eventY = interpolator.getInterpolatedState();
currentEvent.stepAccepted(eventT, eventY);
isLastStep = currentEvent.stop();
// handle the first part of the step, up to the event
for (final StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
if (isLastStep) {
// the event asked to stop integration
System.arraycopy(eventY, 0, y, 0, y.length);
return eventT;
}
if (currentEvent.reset(eventT, eventY)) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
System.arraycopy(eventY, 0, y, 0, y.length);
computeDerivatives(eventT, y, yDot);
resetOccurred = true;
return eventT;
}
// prepare handling of the remaining part of the step
previousT = eventT;
interpolator.setSoftPreviousTime(eventT);
interpolator.setSoftCurrentTime(currentT);
// check if the same event occurs again in the remaining part of the step
if (currentEvent.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(currentEvent);
}
}
interpolator.setInterpolatedTime(currentT);
final double[] currentY = interpolator.getInterpolatedState();
for (final EventState state : eventsStates) {
state.stepAccepted(currentT, currentY);
isLastStep = isLastStep || state.stop();
}
isLastStep = isLastStep || Precision.equals(currentT, tEnd, 1);
// handle the remaining part of the step, after all events if any
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
return currentT;
}
protected double acceptStep(final AbstractStepInterpolator interpolator,
final double[] y, final double[] yDot, final double tEnd)
throws MathIllegalStateException {
double previousT = interpolator.getGlobalPreviousTime();
final double currentT = interpolator.getGlobalCurrentTime();
// initialize the events states if needed
if (! statesInitialized) {
for (EventState state : eventsStates) {
state.reinitializeBegin(interpolator);
}
statesInitialized = true;
}
// search for next events that may occur during the step
final int orderingSign = interpolator.isForward() ? +1 : -1;
SortedSet<EventState> occuringEvents = new TreeSet<EventState>(new Comparator<EventState>() {
/** {@inheritDoc} */
public int compare(EventState es0, EventState es1) {
return orderingSign * Double.compare(es0.getEventTime(), es1.getEventTime());
}
});
for (final EventState state : eventsStates) {
if (state.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(state);
}
}
while (!occuringEvents.isEmpty()) {
// handle the chronologically first event
final Iterator<EventState> iterator = occuringEvents.iterator();
final EventState currentEvent = iterator.next();
iterator.remove();
// restrict the interpolator to the first part of the step, up to the event
final double eventT = currentEvent.getEventTime();
interpolator.setSoftPreviousTime(previousT);
interpolator.setSoftCurrentTime(eventT);
// trigger the event
interpolator.setInterpolatedTime(eventT);
final double[] eventY = interpolator.getInterpolatedState();
currentEvent.stepAccepted(eventT, eventY);
isLastStep = currentEvent.stop();
// handle the first part of the step, up to the event
for (final StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
if (isLastStep) {
// the event asked to stop integration
System.arraycopy(eventY, 0, y, 0, y.length);
for (final EventState remaining : occuringEvents) {
remaining.stepAccepted(eventT, eventY);
}
return eventT;
}
if (currentEvent.reset(eventT, eventY)) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
System.arraycopy(eventY, 0, y, 0, y.length);
computeDerivatives(eventT, y, yDot);
resetOccurred = true;
for (final EventState remaining : occuringEvents) {
remaining.stepAccepted(eventT, eventY);
}
return eventT;
}
// prepare handling of the remaining part of the step
previousT = eventT;
interpolator.setSoftPreviousTime(eventT);
interpolator.setSoftCurrentTime(currentT);
// check if the same event occurs again in the remaining part of the step
if (currentEvent.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(currentEvent);
}
}
interpolator.setInterpolatedTime(currentT);
final double[] currentY = interpolator.getInterpolatedState();
for (final EventState state : eventsStates) {
state.stepAccepted(currentT, currentY);
isLastStep = isLastStep || state.stop();
}
isLastStep = isLastStep || Precision.equals(currentT, tEnd, 1);
// handle the remaining part of the step, after all events if any
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
return currentT;
} | src/main/java/org/apache/commons/math/ode/AbstractIntegrator.java |
Math-45 | public OpenMapRealMatrix(int rowDimension, int columnDimension) {
super(rowDimension, columnDimension);
this.rows = rowDimension;
this.columns = columnDimension;
this.entries = new OpenIntToDoubleHashMap(0.0);
}
public OpenMapRealMatrix(int rowDimension, int columnDimension) {
super(rowDimension, columnDimension);
long lRow = (long) rowDimension;
long lCol = (long) columnDimension;
if (lRow * lCol >= (long) Integer.MAX_VALUE) {
throw new NumberIsTooLargeException(lRow * lCol, Integer.MAX_VALUE, false);
}
this.rows = rowDimension;
this.columns = columnDimension;
this.entries = new OpenIntToDoubleHashMap(0.0);
} | src/main/java/org/apache/commons/math/linear/OpenMapRealMatrix.java |
Math-48 | protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
case REGULA_FALSI:
// Detect early that algorithm is stuck, instead of waiting
// for the maximum number of iterations to be exceeded.
break;
default:
// Should never happen.
throw new MathInternalError();
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
}
protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
case REGULA_FALSI:
// Detect early that algorithm is stuck, instead of waiting
// for the maximum number of iterations to be exceeded.
if (x == x1) {
throw new ConvergenceException();
}
break;
default:
// Should never happen.
throw new MathInternalError();
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
} | src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java |
Math-5 | public Complex reciprocal() {
if (isNaN) {
return NaN;
}
if (real == 0.0 && imaginary == 0.0) {
return NaN;
}
if (isInfinite) {
return ZERO;
}
if (FastMath.abs(real) < FastMath.abs(imaginary)) {
double q = real / imaginary;
double scale = 1. / (real * q + imaginary);
return createComplex(scale * q, -scale);
} else {
double q = imaginary / real;
double scale = 1. / (imaginary * q + real);
return createComplex(scale, -scale * q);
}
}
public Complex reciprocal() {
if (isNaN) {
return NaN;
}
if (real == 0.0 && imaginary == 0.0) {
return INF;
}
if (isInfinite) {
return ZERO;
}
if (FastMath.abs(real) < FastMath.abs(imaginary)) {
double q = real / imaginary;
double scale = 1. / (real * q + imaginary);
return createComplex(scale * q, -scale);
} else {
double q = imaginary / real;
double scale = 1. / (imaginary * q + real);
return createComplex(scale, -scale * q);
}
} | src/main/java/org/apache/commons/math3/complex/Complex.java |
Math-50 | protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
case REGULA_FALSI:
// Nothing.
if (x == x1) {
x0 = 0.5 * (x0 + x1 - FastMath.max(rtol * FastMath.abs(x1), atol));
f0 = computeObjectiveValue(x0);
}
break;
default:
// Should never happen.
throw new MathInternalError();
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
}
protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
case REGULA_FALSI:
// Nothing.
break;
default:
// Should never happen.
throw new MathInternalError();
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
} | src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java |
Math-51 | protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
// Update formula cannot make any progress: Update the
// search interval.
default:
// Should never happen.
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
}
protected final double doSolve() {
// Get initial solution
double x0 = getMin();
double x1 = getMax();
double f0 = computeObjectiveValue(x0);
double f1 = computeObjectiveValue(x1);
// If one of the bounds is the exact root, return it. Since these are
// not under-approximations or over-approximations, we can return them
// regardless of the allowed solutions.
if (f0 == 0.0) {
return x0;
}
if (f1 == 0.0) {
return x1;
}
// Verify bracketing of initial solution.
verifyBracketing(x0, x1);
// Get accuracies.
final double ftol = getFunctionValueAccuracy();
final double atol = getAbsoluteAccuracy();
final double rtol = getRelativeAccuracy();
// Keep track of inverted intervals, meaning that the left bound is
// larger than the right bound.
boolean inverted = false;
// Keep finding better approximations.
while (true) {
// Calculate the next approximation.
final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0));
final double fx = computeObjectiveValue(x);
// If the new approximation is the exact root, return it. Since
// this is not an under-approximation or an over-approximation,
// we can return it regardless of the allowed solutions.
if (fx == 0.0) {
return x;
}
// Update the bounds with the new approximation.
if (f1 * fx < 0) {
// The value of x1 has switched to the other bound, thus inverting
// the interval.
x0 = x1;
f0 = f1;
inverted = !inverted;
} else {
switch (method) {
case ILLINOIS:
f0 *= 0.5;
break;
case PEGASUS:
f0 *= f1 / (f1 + fx);
break;
case REGULA_FALSI:
if (x == x1) {
final double delta = FastMath.max(rtol * FastMath.abs(x1),
atol);
// Update formula cannot make any progress: Update the
// search interval.
x0 = 0.5 * (x0 + x1 - delta);
f0 = computeObjectiveValue(x0);
}
break;
default:
// Should never happen.
throw new MathInternalError();
}
}
// Update from [x0, x1] to [x0, x].
x1 = x;
f1 = fx;
// If the function value of the last approximation is too small,
// given the function value accuracy, then we can't get closer to
// the root than we already are.
if (FastMath.abs(f1) <= ftol) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
if (inverted) {
return x1;
}
break;
case RIGHT_SIDE:
if (!inverted) {
return x1;
}
break;
case BELOW_SIDE:
if (f1 <= 0) {
return x1;
}
break;
case ABOVE_SIDE:
if (f1 >= 0) {
return x1;
}
break;
default:
throw new MathInternalError();
}
}
// If the current interval is within the given accuracies, we
// are satisfied with the current approximation.
if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1),
atol)) {
switch (allowed) {
case ANY_SIDE:
return x1;
case LEFT_SIDE:
return inverted ? x1 : x0;
case RIGHT_SIDE:
return inverted ? x0 : x1;
case BELOW_SIDE:
return (f1 <= 0) ? x1 : x0;
case ABOVE_SIDE:
return (f1 >= 0) ? x1 : x0;
default:
throw new MathInternalError();
}
}
}
} | src/main/java/org/apache/commons/math/analysis/solvers/BaseSecantSolver.java |
Math-52 | public Rotation(Vector3D u1, Vector3D u2, Vector3D v1, Vector3D v2) {
// norms computation
double u1u1 = u1.getNormSq();
double u2u2 = u2.getNormSq();
double v1v1 = v1.getNormSq();
double v2v2 = v2.getNormSq();
if ((u1u1 == 0) || (u2u2 == 0) || (v1v1 == 0) || (v2v2 == 0)) {
throw MathRuntimeException.createIllegalArgumentException(LocalizedFormats.ZERO_NORM_FOR_ROTATION_DEFINING_VECTOR);
}
// normalize v1 in order to have (v1'|v1') = (u1|u1)
v1 = new Vector3D(FastMath.sqrt(u1u1 / v1v1), v1);
// adjust v2 in order to have (u1|u2) = (v1'|v2') and (v2'|v2') = (u2|u2)
double u1u2 = u1.dotProduct(u2);
double v1v2 = v1.dotProduct(v2);
double coeffU = u1u2 / u1u1;
double coeffV = v1v2 / u1u1;
double beta = FastMath.sqrt((u2u2 - u1u2 * coeffU) / (v2v2 - v1v2 * coeffV));
double alpha = coeffU - beta * coeffV;
v2 = new Vector3D(alpha, v1, beta, v2);
// preliminary computation
Vector3D uRef = u1;
Vector3D vRef = v1;
Vector3D v1Su1 = v1.subtract(u1);
Vector3D v2Su2 = v2.subtract(u2);
Vector3D k = v1Su1.crossProduct(v2Su2);
Vector3D u3 = u1.crossProduct(u2);
double c = k.dotProduct(u3);
if (c == 0) {
// the (q1, q2, q3) vector is close to the (u1, u2) plane
// we try other vectors
Vector3D v3 = Vector3D.crossProduct(v1, v2);
Vector3D v3Su3 = v3.subtract(u3);
k = v1Su1.crossProduct(v3Su3);
Vector3D u2Prime = u1.crossProduct(u3);
c = k.dotProduct(u2Prime);
if (c == 0) {
// the (q1, q2, q3) vector is also close to the (u1, u3) plane,
// it is almost aligned with u1: we try (u2, u3) and (v2, v3)
k = v2Su2.crossProduct(v3Su3);;
c = k.dotProduct(u2.crossProduct(u3));;
if (c == 0) {
// the (q1, q2, q3) vector is aligned with everything
// this is really the identity rotation
q0 = 1.0;
q1 = 0.0;
q2 = 0.0;
q3 = 0.0;
return;
}
// we will have to use u2 and v2 to compute the scalar part
uRef = u2;
vRef = v2;
}
}
// compute the vectorial part
c = FastMath.sqrt(c);
double inv = 1.0 / (c + c);
q1 = inv * k.getX();
q2 = inv * k.getY();
q3 = inv * k.getZ();
// compute the scalar part
k = new Vector3D(uRef.getY() * q3 - uRef.getZ() * q2,
uRef.getZ() * q1 - uRef.getX() * q3,
uRef.getX() * q2 - uRef.getY() * q1);
q0 = vRef.dotProduct(k) / (2 * k.getNormSq());
}
public Rotation(Vector3D u1, Vector3D u2, Vector3D v1, Vector3D v2) {
// norms computation
double u1u1 = u1.getNormSq();
double u2u2 = u2.getNormSq();
double v1v1 = v1.getNormSq();
double v2v2 = v2.getNormSq();
if ((u1u1 == 0) || (u2u2 == 0) || (v1v1 == 0) || (v2v2 == 0)) {
throw MathRuntimeException.createIllegalArgumentException(LocalizedFormats.ZERO_NORM_FOR_ROTATION_DEFINING_VECTOR);
}
// normalize v1 in order to have (v1'|v1') = (u1|u1)
v1 = new Vector3D(FastMath.sqrt(u1u1 / v1v1), v1);
// adjust v2 in order to have (u1|u2) = (v1'|v2') and (v2'|v2') = (u2|u2)
double u1u2 = u1.dotProduct(u2);
double v1v2 = v1.dotProduct(v2);
double coeffU = u1u2 / u1u1;
double coeffV = v1v2 / u1u1;
double beta = FastMath.sqrt((u2u2 - u1u2 * coeffU) / (v2v2 - v1v2 * coeffV));
double alpha = coeffU - beta * coeffV;
v2 = new Vector3D(alpha, v1, beta, v2);
// preliminary computation
Vector3D uRef = u1;
Vector3D vRef = v1;
Vector3D v1Su1 = v1.subtract(u1);
Vector3D v2Su2 = v2.subtract(u2);
Vector3D k = v1Su1.crossProduct(v2Su2);
Vector3D u3 = u1.crossProduct(u2);
double c = k.dotProduct(u3);
final double inPlaneThreshold = 0.001;
if (c <= inPlaneThreshold * k.getNorm() * u3.getNorm()) {
// the (q1, q2, q3) vector is close to the (u1, u2) plane
// we try other vectors
Vector3D v3 = Vector3D.crossProduct(v1, v2);
Vector3D v3Su3 = v3.subtract(u3);
k = v1Su1.crossProduct(v3Su3);
Vector3D u2Prime = u1.crossProduct(u3);
c = k.dotProduct(u2Prime);
if (c <= inPlaneThreshold * k.getNorm() * u2Prime.getNorm()) {
// the (q1, q2, q3) vector is also close to the (u1, u3) plane,
// it is almost aligned with u1: we try (u2, u3) and (v2, v3)
k = v2Su2.crossProduct(v3Su3);;
c = k.dotProduct(u2.crossProduct(u3));;
if (c <= 0) {
// the (q1, q2, q3) vector is aligned with everything
// this is really the identity rotation
q0 = 1.0;
q1 = 0.0;
q2 = 0.0;
q3 = 0.0;
return;
}
// we will have to use u2 and v2 to compute the scalar part
uRef = u2;
vRef = v2;
}
}
// compute the vectorial part
c = FastMath.sqrt(c);
double inv = 1.0 / (c + c);
q1 = inv * k.getX();
q2 = inv * k.getY();
q3 = inv * k.getZ();
// compute the scalar part
k = new Vector3D(uRef.getY() * q3 - uRef.getZ() * q2,
uRef.getZ() * q1 - uRef.getX() * q3,
uRef.getX() * q2 - uRef.getY() * q1);
q0 = vRef.dotProduct(k) / (2 * k.getNormSq());
} | src/main/java/org/apache/commons/math/geometry/euclidean/threed/Rotation.java |
Math-53 | public Complex add(Complex rhs)
throws NullArgumentException {
MathUtils.checkNotNull(rhs);
return createComplex(real + rhs.getReal(),
imaginary + rhs.getImaginary());
}
public Complex add(Complex rhs)
throws NullArgumentException {
MathUtils.checkNotNull(rhs);
if (isNaN || rhs.isNaN) {
return NaN;
}
return createComplex(real + rhs.getReal(),
imaginary + rhs.getImaginary());
} | src/main/java/org/apache/commons/math/complex/Complex.java |
Math-55 | public static Vector3D crossProduct(final Vector3D v1, final Vector3D v2) {
// rescale both vectors without losing precision,
// to ensure their norm are the same order of magnitude
// we reduce cancellation errors by preconditioning,
// we replace v1 by v3 = v1 - rho v2 with rho chosen in order to compute
// v3 without loss of precision. See Kahan lecture
// "Computing Cross-Products and Rotations in 2- and 3-Dimensional Euclidean Spaces"
// available at http://www.cs.berkeley.edu/~wkahan/MathH110/Cross.pdf
// compute rho as an 8 bits approximation of v1.v2 / v2.v2
// compute cross product from v3 and v2 instead of v1 and v2
return new Vector3D(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x);
}
public static Vector3D crossProduct(final Vector3D v1, final Vector3D v2) {
final double n1 = v1.getNormSq();
final double n2 = v2.getNormSq();
if ((n1 * n2) < MathUtils.SAFE_MIN) {
return ZERO;
}
// rescale both vectors without losing precision,
// to ensure their norm are the same order of magnitude
final int deltaExp = (FastMath.getExponent(n1) - FastMath.getExponent(n2)) / 4;
final double x1 = FastMath.scalb(v1.x, -deltaExp);
final double y1 = FastMath.scalb(v1.y, -deltaExp);
final double z1 = FastMath.scalb(v1.z, -deltaExp);
final double x2 = FastMath.scalb(v2.x, deltaExp);
final double y2 = FastMath.scalb(v2.y, deltaExp);
final double z2 = FastMath.scalb(v2.z, deltaExp);
// we reduce cancellation errors by preconditioning,
// we replace v1 by v3 = v1 - rho v2 with rho chosen in order to compute
// v3 without loss of precision. See Kahan lecture
// "Computing Cross-Products and Rotations in 2- and 3-Dimensional Euclidean Spaces"
// available at http://www.cs.berkeley.edu/~wkahan/MathH110/Cross.pdf
// compute rho as an 8 bits approximation of v1.v2 / v2.v2
final double ratio = (x1 * x2 + y1 * y2 + z1 * z2) / FastMath.scalb(n2, 2 * deltaExp);
final double rho = FastMath.rint(256 * ratio) / 256;
final double x3 = x1 - rho * x2;
final double y3 = y1 - rho * y2;
final double z3 = z1 - rho * z2;
// compute cross product from v3 and v2 instead of v1 and v2
return new Vector3D(y3 * z2 - z3 * y2, z3 * x2 - x3 * z2, x3 * y2 - y3 * x2);
} | src/main/java/org/apache/commons/math/geometry/Vector3D.java |
Math-56 | public int[] getCounts(int index) {
if (index < 0 ||
index >= totalSize) {
throw new OutOfRangeException(index, 0, totalSize);
}
final int[] indices = new int[dimension];
int count = 0;
for (int i = 0; i < last; i++) {
int idx = 0;
final int offset = uniCounterOffset[i];
while (count <= index) {
count += offset;
++idx;
}
--idx;
count -= offset;
indices[i] = idx;
}
int idx = 1;
while (count < index) {
count += idx;
++idx;
}
--idx;
indices[last] = idx;
return indices;
}
public int[] getCounts(int index) {
if (index < 0 ||
index >= totalSize) {
throw new OutOfRangeException(index, 0, totalSize);
}
final int[] indices = new int[dimension];
int count = 0;
for (int i = 0; i < last; i++) {
int idx = 0;
final int offset = uniCounterOffset[i];
while (count <= index) {
count += offset;
++idx;
}
--idx;
count -= offset;
indices[i] = idx;
}
indices[last] = index - count;
return indices;
} | src/main/java/org/apache/commons/math/util/MultidimensionalCounter.java |
Math-57 | private static <T extends Clusterable<T>> List<Cluster<T>>
chooseInitialCenters(final Collection<T> points, final int k, final Random random) {
final List<T> pointSet = new ArrayList<T>(points);
final List<Cluster<T>> resultSet = new ArrayList<Cluster<T>>();
// Choose one center uniformly at random from among the data points.
final T firstPoint = pointSet.remove(random.nextInt(pointSet.size()));
resultSet.add(new Cluster<T>(firstPoint));
final double[] dx2 = new double[pointSet.size()];
while (resultSet.size() < k) {
// For each data point x, compute D(x), the distance between x and
// the nearest center that has already been chosen.
int sum = 0;
for (int i = 0; i < pointSet.size(); i++) {
final T p = pointSet.get(i);
final Cluster<T> nearest = getNearestCluster(resultSet, p);
final double d = p.distanceFrom(nearest.getCenter());
sum += d * d;
dx2[i] = sum;
}
// Add one new data point as a center. Each point x is chosen with
// probability proportional to D(x)2
final double r = random.nextDouble() * sum;
for (int i = 0 ; i < dx2.length; i++) {
if (dx2[i] >= r) {
final T p = pointSet.remove(i);
resultSet.add(new Cluster<T>(p));
break;
}
}
}
return resultSet;
}
private static <T extends Clusterable<T>> List<Cluster<T>>
chooseInitialCenters(final Collection<T> points, final int k, final Random random) {
final List<T> pointSet = new ArrayList<T>(points);
final List<Cluster<T>> resultSet = new ArrayList<Cluster<T>>();
// Choose one center uniformly at random from among the data points.
final T firstPoint = pointSet.remove(random.nextInt(pointSet.size()));
resultSet.add(new Cluster<T>(firstPoint));
final double[] dx2 = new double[pointSet.size()];
while (resultSet.size() < k) {
// For each data point x, compute D(x), the distance between x and
// the nearest center that has already been chosen.
double sum = 0;
for (int i = 0; i < pointSet.size(); i++) {
final T p = pointSet.get(i);
final Cluster<T> nearest = getNearestCluster(resultSet, p);
final double d = p.distanceFrom(nearest.getCenter());
sum += d * d;
dx2[i] = sum;
}
// Add one new data point as a center. Each point x is chosen with
// probability proportional to D(x)2
final double r = random.nextDouble() * sum;
for (int i = 0 ; i < dx2.length; i++) {
if (dx2[i] >= r) {
final T p = pointSet.remove(i);
resultSet.add(new Cluster<T>(p));
break;
}
}
}
return resultSet;
} | src/main/java/org/apache/commons/math/stat/clustering/KMeansPlusPlusClusterer.java |
Math-58 | public double[] fit() {
final double[] guess = (new ParameterGuesser(getObservations())).guess();
return fit(new Gaussian.Parametric(), guess);
}
public double[] fit() {
final double[] guess = (new ParameterGuesser(getObservations())).guess();
return fit(guess);
} | src/main/java/org/apache/commons/math/optimization/fitting/GaussianFitter.java |
Math-59 | public static float max(final float a, final float b) {
return (a <= b) ? b : (Float.isNaN(a + b) ? Float.NaN : b);
}
public static float max(final float a, final float b) {
return (a <= b) ? b : (Float.isNaN(a + b) ? Float.NaN : a);
} | src/main/java/org/apache/commons/math/util/FastMath.java |
Math-60 | public double cumulativeProbability(double x) throws MathException {
final double dev = x - mean;
try {
return 0.5 * (1.0 + Erf.erf((dev) /
(standardDeviation * FastMath.sqrt(2.0))));
} catch (MaxIterationsExceededException ex) {
if (x < (mean - 20 * standardDeviation)) { // JDK 1.5 blows at 38
return 0;
} else if (x > (mean + 20 * standardDeviation)) {
return 1;
} else {
throw ex;
}
}
}
public double cumulativeProbability(double x) throws MathException {
final double dev = x - mean;
if (FastMath.abs(dev) > 40 * standardDeviation) {
return dev < 0 ? 0.0d : 1.0d;
}
return 0.5 * (1.0 + Erf.erf((dev) /
(standardDeviation * FastMath.sqrt(2.0))));
} | src/main/java/org/apache/commons/math/distribution/NormalDistributionImpl.java |
Math-63 | public static boolean equals(double x, double y) {
return (Double.isNaN(x) && Double.isNaN(y)) || x == y;
}
public static boolean equals(double x, double y) {
return equals(x, y, 1);
} | src/main/java/org/apache/commons/math/util/MathUtils.java |
Math-64 | protected VectorialPointValuePair doOptimize()
throws FunctionEvaluationException, OptimizationException, IllegalArgumentException {
// arrays shared with the other private methods
solvedCols = Math.min(rows, cols);
diagR = new double[cols];
jacNorm = new double[cols];
beta = new double[cols];
permutation = new int[cols];
lmDir = new double[cols];
// local point
double delta = 0;
double xNorm = 0;
double[] diag = new double[cols];
double[] oldX = new double[cols];
double[] oldRes = new double[rows];
double[] work1 = new double[cols];
double[] work2 = new double[cols];
double[] work3 = new double[cols];
// evaluate the function at the starting point and calculate its norm
updateResidualsAndCost();
// outer loop
lmPar = 0;
boolean firstIteration = true;
VectorialPointValuePair current = new VectorialPointValuePair(point, objective);
while (true) {
incrementIterationsCounter();
// compute the Q.R. decomposition of the jacobian matrix
VectorialPointValuePair previous = current;
updateJacobian();
qrDecomposition();
// compute Qt.res
qTy(residuals);
// now we don't need Q anymore,
// so let jacobian contain the R matrix with its diagonal elements
for (int k = 0; k < solvedCols; ++k) {
int pk = permutation[k];
jacobian[k][pk] = diagR[pk];
}
if (firstIteration) {
// scale the point according to the norms of the columns
// of the initial jacobian
xNorm = 0;
for (int k = 0; k < cols; ++k) {
double dk = jacNorm[k];
if (dk == 0) {
dk = 1.0;
}
double xk = dk * point[k];
xNorm += xk * xk;
diag[k] = dk;
}
xNorm = Math.sqrt(xNorm);
// initialize the step bound delta
delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
}
// check orthogonality between function vector and jacobian columns
double maxCosine = 0;
if (cost != 0) {
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
double s = jacNorm[pj];
if (s != 0) {
double sum = 0;
for (int i = 0; i <= j; ++i) {
sum += jacobian[i][pj] * residuals[i];
}
maxCosine = Math.max(maxCosine, Math.abs(sum) / (s * cost));
}
}
}
if (maxCosine <= orthoTolerance) {
// convergence has been reached
return current;
}
// rescale if necessary
for (int j = 0; j < cols; ++j) {
diag[j] = Math.max(diag[j], jacNorm[j]);
}
// inner loop
for (double ratio = 0; ratio < 1.0e-4;) {
// save the state
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
oldX[pj] = point[pj];
}
double previousCost = cost;
double[] tmpVec = residuals;
residuals = oldRes;
oldRes = tmpVec;
// determine the Levenberg-Marquardt parameter
determineLMParameter(oldRes, delta, diag, work1, work2, work3);
// compute the new point and the norm of the evolution direction
double lmNorm = 0;
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
lmDir[pj] = -lmDir[pj];
point[pj] = oldX[pj] + lmDir[pj];
double s = diag[pj] * lmDir[pj];
lmNorm += s * s;
}
lmNorm = Math.sqrt(lmNorm);
// on the first iteration, adjust the initial step bound.
if (firstIteration) {
delta = Math.min(delta, lmNorm);
}
// evaluate the function at x + p and calculate its norm
updateResidualsAndCost();
current = new VectorialPointValuePair(point, objective);
// compute the scaled actual reduction
double actRed = -1.0;
if (0.1 * cost < previousCost) {
double r = cost / previousCost;
actRed = 1.0 - r * r;
}
// compute the scaled predicted reduction
// and the scaled directional derivative
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
double dirJ = lmDir[pj];
work1[j] = 0;
for (int i = 0; i <= j; ++i) {
work1[i] += jacobian[i][pj] * dirJ;
}
}
double coeff1 = 0;
for (int j = 0; j < solvedCols; ++j) {
coeff1 += work1[j] * work1[j];
}
double pc2 = previousCost * previousCost;
coeff1 = coeff1 / pc2;
double coeff2 = lmPar * lmNorm * lmNorm / pc2;
double preRed = coeff1 + 2 * coeff2;
double dirDer = -(coeff1 + coeff2);
// ratio of the actual to the predicted reduction
ratio = (preRed == 0) ? 0 : (actRed / preRed);
// update the step bound
if (ratio <= 0.25) {
double tmp =
(actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
if ((0.1 * cost >= previousCost) || (tmp < 0.1)) {
tmp = 0.1;
}
delta = tmp * Math.min(delta, 10.0 * lmNorm);
lmPar /= tmp;
} else if ((lmPar == 0) || (ratio >= 0.75)) {
delta = 2 * lmNorm;
lmPar *= 0.5;
}
// test for successful iteration.
if (ratio >= 1.0e-4) {
// successful iteration, update the norm
firstIteration = false;
xNorm = 0;
for (int k = 0; k < cols; ++k) {
double xK = diag[k] * point[k];
xNorm += xK * xK;
}
xNorm = Math.sqrt(xNorm);
// tests for convergence.
// we use the vectorial convergence checker
} else {
// failed iteration, reset the previous values
cost = previousCost;
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
point[pj] = oldX[pj];
}
tmpVec = residuals;
residuals = oldRes;
oldRes = tmpVec;
}
if (checker==null) {
if (((Math.abs(actRed) <= costRelativeTolerance) &&
(preRed <= costRelativeTolerance) &&
(ratio <= 2.0)) ||
(delta <= parRelativeTolerance * xNorm)) {
return current;
}
} else {
if (checker.converged(getIterations(), previous, current)) {
return current;
}
}
// tests for termination and stringent tolerances
// (2.2204e-16 is the machine epsilon for IEEE754)
if ((Math.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
costRelativeTolerance);
} else if (delta <= 2.2204e-16 * xNorm) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
parRelativeTolerance);
} else if (maxCosine <= 2.2204e-16) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
orthoTolerance);
}
}
}
}
protected VectorialPointValuePair doOptimize()
throws FunctionEvaluationException, OptimizationException, IllegalArgumentException {
// arrays shared with the other private methods
solvedCols = Math.min(rows, cols);
diagR = new double[cols];
jacNorm = new double[cols];
beta = new double[cols];
permutation = new int[cols];
lmDir = new double[cols];
// local point
double delta = 0;
double xNorm = 0;
double[] diag = new double[cols];
double[] oldX = new double[cols];
double[] oldRes = new double[rows];
double[] oldObj = new double[rows];
double[] qtf = new double[rows];
double[] work1 = new double[cols];
double[] work2 = new double[cols];
double[] work3 = new double[cols];
// evaluate the function at the starting point and calculate its norm
updateResidualsAndCost();
// outer loop
lmPar = 0;
boolean firstIteration = true;
VectorialPointValuePair current = new VectorialPointValuePair(point, objective);
while (true) {
for (int i=0;i<rows;i++) {
qtf[i]=residuals[i];
}
incrementIterationsCounter();
// compute the Q.R. decomposition of the jacobian matrix
VectorialPointValuePair previous = current;
updateJacobian();
qrDecomposition();
// compute Qt.res
qTy(qtf);
// now we don't need Q anymore,
// so let jacobian contain the R matrix with its diagonal elements
for (int k = 0; k < solvedCols; ++k) {
int pk = permutation[k];
jacobian[k][pk] = diagR[pk];
}
if (firstIteration) {
// scale the point according to the norms of the columns
// of the initial jacobian
xNorm = 0;
for (int k = 0; k < cols; ++k) {
double dk = jacNorm[k];
if (dk == 0) {
dk = 1.0;
}
double xk = dk * point[k];
xNorm += xk * xk;
diag[k] = dk;
}
xNorm = Math.sqrt(xNorm);
// initialize the step bound delta
delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
}
// check orthogonality between function vector and jacobian columns
double maxCosine = 0;
if (cost != 0) {
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
double s = jacNorm[pj];
if (s != 0) {
double sum = 0;
for (int i = 0; i <= j; ++i) {
sum += jacobian[i][pj] * qtf[i];
}
maxCosine = Math.max(maxCosine, Math.abs(sum) / (s * cost));
}
}
}
if (maxCosine <= orthoTolerance) {
// convergence has been reached
updateResidualsAndCost();
current = new VectorialPointValuePair(point, objective);
return current;
}
// rescale if necessary
for (int j = 0; j < cols; ++j) {
diag[j] = Math.max(diag[j], jacNorm[j]);
}
// inner loop
for (double ratio = 0; ratio < 1.0e-4;) {
// save the state
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
oldX[pj] = point[pj];
}
double previousCost = cost;
double[] tmpVec = residuals;
residuals = oldRes;
oldRes = tmpVec;
tmpVec = objective;
objective = oldObj;
oldObj = tmpVec;
// determine the Levenberg-Marquardt parameter
determineLMParameter(qtf, delta, diag, work1, work2, work3);
// compute the new point and the norm of the evolution direction
double lmNorm = 0;
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
lmDir[pj] = -lmDir[pj];
point[pj] = oldX[pj] + lmDir[pj];
double s = diag[pj] * lmDir[pj];
lmNorm += s * s;
}
lmNorm = Math.sqrt(lmNorm);
// on the first iteration, adjust the initial step bound.
if (firstIteration) {
delta = Math.min(delta, lmNorm);
}
// evaluate the function at x + p and calculate its norm
updateResidualsAndCost();
// compute the scaled actual reduction
double actRed = -1.0;
if (0.1 * cost < previousCost) {
double r = cost / previousCost;
actRed = 1.0 - r * r;
}
// compute the scaled predicted reduction
// and the scaled directional derivative
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
double dirJ = lmDir[pj];
work1[j] = 0;
for (int i = 0; i <= j; ++i) {
work1[i] += jacobian[i][pj] * dirJ;
}
}
double coeff1 = 0;
for (int j = 0; j < solvedCols; ++j) {
coeff1 += work1[j] * work1[j];
}
double pc2 = previousCost * previousCost;
coeff1 = coeff1 / pc2;
double coeff2 = lmPar * lmNorm * lmNorm / pc2;
double preRed = coeff1 + 2 * coeff2;
double dirDer = -(coeff1 + coeff2);
// ratio of the actual to the predicted reduction
ratio = (preRed == 0) ? 0 : (actRed / preRed);
// update the step bound
if (ratio <= 0.25) {
double tmp =
(actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
if ((0.1 * cost >= previousCost) || (tmp < 0.1)) {
tmp = 0.1;
}
delta = tmp * Math.min(delta, 10.0 * lmNorm);
lmPar /= tmp;
} else if ((lmPar == 0) || (ratio >= 0.75)) {
delta = 2 * lmNorm;
lmPar *= 0.5;
}
// test for successful iteration.
if (ratio >= 1.0e-4) {
// successful iteration, update the norm
firstIteration = false;
xNorm = 0;
for (int k = 0; k < cols; ++k) {
double xK = diag[k] * point[k];
xNorm += xK * xK;
}
xNorm = Math.sqrt(xNorm);
current = new VectorialPointValuePair(point, objective);
// tests for convergence.
if (checker != null) {
// we use the vectorial convergence checker
if (checker.converged(getIterations(), previous, current)) {
return current;
}
}
} else {
// failed iteration, reset the previous values
cost = previousCost;
for (int j = 0; j < solvedCols; ++j) {
int pj = permutation[j];
point[pj] = oldX[pj];
}
tmpVec = residuals;
residuals = oldRes;
oldRes = tmpVec;
tmpVec = objective;
objective = oldObj;
oldObj = tmpVec;
}
if (checker==null) {
if (((Math.abs(actRed) <= costRelativeTolerance) &&
(preRed <= costRelativeTolerance) &&
(ratio <= 2.0)) ||
(delta <= parRelativeTolerance * xNorm)) {
return current;
}
}
// tests for termination and stringent tolerances
// (2.2204e-16 is the machine epsilon for IEEE754)
if ((Math.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
costRelativeTolerance);
} else if (delta <= 2.2204e-16 * xNorm) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
parRelativeTolerance);
} else if (maxCosine <= 2.2204e-16) {
throw new OptimizationException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
orthoTolerance);
}
}
}
} | src/main/java/org/apache/commons/math/optimization/general/LevenbergMarquardtOptimizer.java |
Math-69 | public RealMatrix getCorrelationPValues() throws MathException {
TDistribution tDistribution = new TDistributionImpl(nObs - 2);
int nVars = correlationMatrix.getColumnDimension();
double[][] out = new double[nVars][nVars];
for (int i = 0; i < nVars; i++) {
for (int j = 0; j < nVars; j++) {
if (i == j) {
out[i][j] = 0d;
} else {
double r = correlationMatrix.getEntry(i, j);
double t = Math.abs(r * Math.sqrt((nObs - 2)/(1 - r * r)));
out[i][j] = 2 * (1 - tDistribution.cumulativeProbability(t));
}
}
}
return new BlockRealMatrix(out);
}
public RealMatrix getCorrelationPValues() throws MathException {
TDistribution tDistribution = new TDistributionImpl(nObs - 2);
int nVars = correlationMatrix.getColumnDimension();
double[][] out = new double[nVars][nVars];
for (int i = 0; i < nVars; i++) {
for (int j = 0; j < nVars; j++) {
if (i == j) {
out[i][j] = 0d;
} else {
double r = correlationMatrix.getEntry(i, j);
double t = Math.abs(r * Math.sqrt((nObs - 2)/(1 - r * r)));
out[i][j] = 2 * tDistribution.cumulativeProbability(-t);
}
}
}
return new BlockRealMatrix(out);
} | src/main/java/org/apache/commons/math/stat/correlation/PearsonsCorrelation.java |
Math-7 | protected double acceptStep(final AbstractStepInterpolator interpolator,
final double[] y, final double[] yDot, final double tEnd)
throws MaxCountExceededException, DimensionMismatchException, NoBracketingException {
double previousT = interpolator.getGlobalPreviousTime();
final double currentT = interpolator.getGlobalCurrentTime();
// initialize the events states if needed
if (! statesInitialized) {
for (EventState state : eventsStates) {
state.reinitializeBegin(interpolator);
}
statesInitialized = true;
}
// search for next events that may occur during the step
final int orderingSign = interpolator.isForward() ? +1 : -1;
SortedSet<EventState> occuringEvents = new TreeSet<EventState>(new Comparator<EventState>() {
/** {@inheritDoc} */
public int compare(EventState es0, EventState es1) {
return orderingSign * Double.compare(es0.getEventTime(), es1.getEventTime());
}
});
for (final EventState state : eventsStates) {
if (state.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(state);
}
}
while (!occuringEvents.isEmpty()) {
// handle the chronologically first event
final Iterator<EventState> iterator = occuringEvents.iterator();
final EventState currentEvent = iterator.next();
iterator.remove();
// restrict the interpolator to the first part of the step, up to the event
final double eventT = currentEvent.getEventTime();
interpolator.setSoftPreviousTime(previousT);
interpolator.setSoftCurrentTime(eventT);
// get state at event time
interpolator.setInterpolatedTime(eventT);
final double[] eventY = interpolator.getInterpolatedState().clone();
// advance all event states to current time
currentEvent.stepAccepted(eventT, eventY);
isLastStep = currentEvent.stop();
// handle the first part of the step, up to the event
for (final StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
if (isLastStep) {
// the event asked to stop integration
System.arraycopy(eventY, 0, y, 0, y.length);
for (final EventState remaining : occuringEvents) {
remaining.stepAccepted(eventT, eventY);
}
return eventT;
}
boolean needReset = currentEvent.reset(eventT, eventY);
if (needReset) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
System.arraycopy(eventY, 0, y, 0, y.length);
computeDerivatives(eventT, y, yDot);
resetOccurred = true;
for (final EventState remaining : occuringEvents) {
remaining.stepAccepted(eventT, eventY);
}
return eventT;
}
// prepare handling of the remaining part of the step
previousT = eventT;
interpolator.setSoftPreviousTime(eventT);
interpolator.setSoftCurrentTime(currentT);
// check if the same event occurs again in the remaining part of the step
if (currentEvent.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(currentEvent);
}
}
// last part of the step, after the last event
interpolator.setInterpolatedTime(currentT);
final double[] currentY = interpolator.getInterpolatedState();
for (final EventState state : eventsStates) {
state.stepAccepted(currentT, currentY);
isLastStep = isLastStep || state.stop();
}
isLastStep = isLastStep || Precision.equals(currentT, tEnd, 1);
// handle the remaining part of the step, after all events if any
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
return currentT;
}
protected double acceptStep(final AbstractStepInterpolator interpolator,
final double[] y, final double[] yDot, final double tEnd)
throws MaxCountExceededException, DimensionMismatchException, NoBracketingException {
double previousT = interpolator.getGlobalPreviousTime();
final double currentT = interpolator.getGlobalCurrentTime();
// initialize the events states if needed
if (! statesInitialized) {
for (EventState state : eventsStates) {
state.reinitializeBegin(interpolator);
}
statesInitialized = true;
}
// search for next events that may occur during the step
final int orderingSign = interpolator.isForward() ? +1 : -1;
SortedSet<EventState> occuringEvents = new TreeSet<EventState>(new Comparator<EventState>() {
/** {@inheritDoc} */
public int compare(EventState es0, EventState es1) {
return orderingSign * Double.compare(es0.getEventTime(), es1.getEventTime());
}
});
for (final EventState state : eventsStates) {
if (state.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(state);
}
}
while (!occuringEvents.isEmpty()) {
// handle the chronologically first event
final Iterator<EventState> iterator = occuringEvents.iterator();
final EventState currentEvent = iterator.next();
iterator.remove();
// restrict the interpolator to the first part of the step, up to the event
final double eventT = currentEvent.getEventTime();
interpolator.setSoftPreviousTime(previousT);
interpolator.setSoftCurrentTime(eventT);
// get state at event time
interpolator.setInterpolatedTime(eventT);
final double[] eventY = interpolator.getInterpolatedState().clone();
// advance all event states to current time
for (final EventState state : eventsStates) {
state.stepAccepted(eventT, eventY);
isLastStep = isLastStep || state.stop();
}
// handle the first part of the step, up to the event
for (final StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
if (isLastStep) {
// the event asked to stop integration
System.arraycopy(eventY, 0, y, 0, y.length);
return eventT;
}
boolean needReset = false;
for (final EventState state : eventsStates) {
needReset = needReset || state.reset(eventT, eventY);
}
if (needReset) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
System.arraycopy(eventY, 0, y, 0, y.length);
computeDerivatives(eventT, y, yDot);
resetOccurred = true;
return eventT;
}
// prepare handling of the remaining part of the step
previousT = eventT;
interpolator.setSoftPreviousTime(eventT);
interpolator.setSoftCurrentTime(currentT);
// check if the same event occurs again in the remaining part of the step
if (currentEvent.evaluateStep(interpolator)) {
// the event occurs during the current step
occuringEvents.add(currentEvent);
}
}
// last part of the step, after the last event
interpolator.setInterpolatedTime(currentT);
final double[] currentY = interpolator.getInterpolatedState();
for (final EventState state : eventsStates) {
state.stepAccepted(currentT, currentY);
isLastStep = isLastStep || state.stop();
}
isLastStep = isLastStep || Precision.equals(currentT, tEnd, 1);
// handle the remaining part of the step, after all events if any
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, isLastStep);
}
return currentT;
} | src/main/java/org/apache/commons/math3/ode/AbstractIntegrator.java |
Math-70 | public double solve(final UnivariateRealFunction f, double min, double max, double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
return solve(min, max);
}
public double solve(final UnivariateRealFunction f, double min, double max, double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
return solve(f, min, max);
} | src/main/java/org/apache/commons/math/analysis/solvers/BisectionSolver.java |
Math-72 | public double solve(final UnivariateRealFunction f,
final double min, final double max, final double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
clearResult();
verifySequence(min, initial, max);
// return the initial guess if it is good enough
double yInitial = f.value(initial);
if (Math.abs(yInitial) <= functionValueAccuracy) {
setResult(initial, 0);
return result;
}
// return the first endpoint if it is good enough
double yMin = f.value(min);
if (Math.abs(yMin) <= functionValueAccuracy) {
setResult(yMin, 0);
return result;
}
// reduce interval if min and initial bracket the root
if (yInitial * yMin < 0) {
return solve(f, min, yMin, initial, yInitial, min, yMin);
}
// return the second endpoint if it is good enough
double yMax = f.value(max);
if (Math.abs(yMax) <= functionValueAccuracy) {
setResult(yMax, 0);
return result;
}
// reduce interval if initial and max bracket the root
if (yInitial * yMax < 0) {
return solve(f, initial, yInitial, max, yMax, initial, yInitial);
}
if (yMin * yMax > 0) {
throw MathRuntimeException.createIllegalArgumentException(
NON_BRACKETING_MESSAGE, min, max, yMin, yMax);
}
// full Brent algorithm starting with provided initial guess
return solve(f, min, yMin, max, yMax, initial, yInitial);
}
public double solve(final UnivariateRealFunction f,
final double min, final double max, final double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
clearResult();
verifySequence(min, initial, max);
// return the initial guess if it is good enough
double yInitial = f.value(initial);
if (Math.abs(yInitial) <= functionValueAccuracy) {
setResult(initial, 0);
return result;
}
// return the first endpoint if it is good enough
double yMin = f.value(min);
if (Math.abs(yMin) <= functionValueAccuracy) {
setResult(min, 0);
return result;
}
// reduce interval if min and initial bracket the root
if (yInitial * yMin < 0) {
return solve(f, min, yMin, initial, yInitial, min, yMin);
}
// return the second endpoint if it is good enough
double yMax = f.value(max);
if (Math.abs(yMax) <= functionValueAccuracy) {
setResult(max, 0);
return result;
}
// reduce interval if initial and max bracket the root
if (yInitial * yMax < 0) {
return solve(f, initial, yInitial, max, yMax, initial, yInitial);
}
if (yMin * yMax > 0) {
throw MathRuntimeException.createIllegalArgumentException(
NON_BRACKETING_MESSAGE, min, max, yMin, yMax);
}
// full Brent algorithm starting with provided initial guess
return solve(f, min, yMin, max, yMax, initial, yInitial);
} | src/main/java/org/apache/commons/math/analysis/solvers/BrentSolver.java |
Math-73 | public double solve(final UnivariateRealFunction f,
final double min, final double max, final double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
clearResult();
verifySequence(min, initial, max);
// return the initial guess if it is good enough
double yInitial = f.value(initial);
if (Math.abs(yInitial) <= functionValueAccuracy) {
setResult(initial, 0);
return result;
}
// return the first endpoint if it is good enough
double yMin = f.value(min);
if (Math.abs(yMin) <= functionValueAccuracy) {
setResult(yMin, 0);
return result;
}
// reduce interval if min and initial bracket the root
if (yInitial * yMin < 0) {
return solve(f, min, yMin, initial, yInitial, min, yMin);
}
// return the second endpoint if it is good enough
double yMax = f.value(max);
if (Math.abs(yMax) <= functionValueAccuracy) {
setResult(yMax, 0);
return result;
}
// reduce interval if initial and max bracket the root
if (yInitial * yMax < 0) {
return solve(f, initial, yInitial, max, yMax, initial, yInitial);
}
// full Brent algorithm starting with provided initial guess
return solve(f, min, yMin, max, yMax, initial, yInitial);
}
public double solve(final UnivariateRealFunction f,
final double min, final double max, final double initial)
throws MaxIterationsExceededException, FunctionEvaluationException {
clearResult();
verifySequence(min, initial, max);
// return the initial guess if it is good enough
double yInitial = f.value(initial);
if (Math.abs(yInitial) <= functionValueAccuracy) {
setResult(initial, 0);
return result;
}
// return the first endpoint if it is good enough
double yMin = f.value(min);
if (Math.abs(yMin) <= functionValueAccuracy) {
setResult(yMin, 0);
return result;
}
// reduce interval if min and initial bracket the root
if (yInitial * yMin < 0) {
return solve(f, min, yMin, initial, yInitial, min, yMin);
}
// return the second endpoint if it is good enough
double yMax = f.value(max);
if (Math.abs(yMax) <= functionValueAccuracy) {
setResult(yMax, 0);
return result;
}
// reduce interval if initial and max bracket the root
if (yInitial * yMax < 0) {
return solve(f, initial, yInitial, max, yMax, initial, yInitial);
}
if (yMin * yMax > 0) {
throw MathRuntimeException.createIllegalArgumentException(
NON_BRACKETING_MESSAGE, min, max, yMin, yMax);
}
// full Brent algorithm starting with provided initial guess
return solve(f, min, yMin, max, yMax, initial, yInitial);
} | src/main/java/org/apache/commons/math/analysis/solvers/BrentSolver.java |
Math-74 | public double integrate(final FirstOrderDifferentialEquations equations,
final double t0, final double[] y0,
final double t, final double[] y)
throws DerivativeException, IntegratorException {
sanityChecks(equations, t0, y0, t, y);
setEquations(equations);
resetEvaluations();
final boolean forward = t > t0;
// create some internal working arrays
final int stages = c.length + 1;
if (y != y0) {
System.arraycopy(y0, 0, y, 0, y0.length);
}
final double[][] yDotK = new double[stages][y0.length];
final double[] yTmp = new double[y0.length];
// set up an interpolator sharing the integrator arrays
AbstractStepInterpolator interpolator;
if (requiresDenseOutput() || (! eventsHandlersManager.isEmpty())) {
final RungeKuttaStepInterpolator rki = (RungeKuttaStepInterpolator) prototype.copy();
rki.reinitialize(this, yTmp, yDotK, forward);
interpolator = rki;
} else {
interpolator = new DummyStepInterpolator(yTmp, forward);
}
interpolator.storeTime(t0);
// set up integration control objects
stepStart = t0;
double hNew = 0;
boolean firstTime = true;
for (StepHandler handler : stepHandlers) {
handler.reset();
}
CombinedEventsManager manager = addEndTimeChecker(t0, t, eventsHandlersManager);
boolean lastStep = false;
// main integration loop
while (!lastStep) {
interpolator.shift();
double error = 0;
for (boolean loop = true; loop;) {
if (firstTime || !fsal) {
// first stage
computeDerivatives(stepStart, y, yDotK[0]);
}
if (firstTime) {
final double[] scale;
if (vecAbsoluteTolerance == null) {
scale = new double[y0.length];
java.util.Arrays.fill(scale, scalAbsoluteTolerance);
} else {
scale = vecAbsoluteTolerance;
}
hNew = initializeStep(equations, forward, getOrder(), scale,
stepStart, y, yDotK[0], yTmp, yDotK[1]);
firstTime = false;
}
stepSize = hNew;
// next stages
for (int k = 1; k < stages; ++k) {
for (int j = 0; j < y0.length; ++j) {
double sum = a[k-1][0] * yDotK[0][j];
for (int l = 1; l < k; ++l) {
sum += a[k-1][l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
computeDerivatives(stepStart + c[k-1] * stepSize, yTmp, yDotK[k]);
}
// estimate the state at the end of the step
for (int j = 0; j < y0.length; ++j) {
double sum = b[0] * yDotK[0][j];
for (int l = 1; l < stages; ++l) {
sum += b[l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
// estimate the error at the end of the step
error = estimateError(yDotK, y, yTmp, stepSize);
if (error <= 1.0) {
// discrete events handling
interpolator.storeTime(stepStart + stepSize);
if (manager.evaluateStep(interpolator)) {
final double dt = manager.getEventTime() - stepStart;
if (Math.abs(dt) <= Math.ulp(stepStart)) {
// rejecting the step would lead to a too small next step, we accept it
loop = false;
} else {
// reject the step to match exactly the next switch time
hNew = dt;
}
} else {
// accept the step
loop = false;
}
} else {
// reject the step and attempt to reduce error by stepsize control
final double factor =
Math.min(maxGrowth,
Math.max(minReduction, safety * Math.pow(error, exp)));
hNew = filterStep(stepSize * factor, forward, false);
}
}
// the step has been accepted
final double nextStep = stepStart + stepSize;
System.arraycopy(yTmp, 0, y, 0, y0.length);
manager.stepAccepted(nextStep, y);
lastStep = manager.stop();
// provide the step data to the step handler
interpolator.storeTime(nextStep);
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, lastStep);
}
stepStart = nextStep;
if (fsal) {
// save the last evaluation for the next step
System.arraycopy(yDotK[stages - 1], 0, yDotK[0], 0, y0.length);
}
if (manager.reset(stepStart, y) && ! lastStep) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
computeDerivatives(stepStart, y, yDotK[0]);
}
if (! lastStep) {
// in some rare cases we may get here with stepSize = 0, for example
// when an event occurs at integration start, reducing the first step
// to zero; we have to reset the step to some safe non zero value
stepSize = filterStep(stepSize, forward, true);
// stepsize control for next step
final double factor = Math.min(maxGrowth,
Math.max(minReduction,
safety * Math.pow(error, exp)));
final double scaledH = stepSize * factor;
final double nextT = stepStart + scaledH;
final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
hNew = filterStep(scaledH, forward, nextIsLast);
}
}
final double stopTime = stepStart;
resetInternalState();
return stopTime;
}
public double integrate(final FirstOrderDifferentialEquations equations,
final double t0, final double[] y0,
final double t, final double[] y)
throws DerivativeException, IntegratorException {
sanityChecks(equations, t0, y0, t, y);
setEquations(equations);
resetEvaluations();
final boolean forward = t > t0;
// create some internal working arrays
final int stages = c.length + 1;
if (y != y0) {
System.arraycopy(y0, 0, y, 0, y0.length);
}
final double[][] yDotK = new double[stages][y0.length];
final double[] yTmp = new double[y0.length];
// set up an interpolator sharing the integrator arrays
AbstractStepInterpolator interpolator;
if (requiresDenseOutput() || (! eventsHandlersManager.isEmpty())) {
final RungeKuttaStepInterpolator rki = (RungeKuttaStepInterpolator) prototype.copy();
rki.reinitialize(this, yTmp, yDotK, forward);
interpolator = rki;
} else {
interpolator = new DummyStepInterpolator(yTmp, forward);
}
interpolator.storeTime(t0);
// set up integration control objects
stepStart = t0;
double hNew = 0;
boolean firstTime = true;
for (StepHandler handler : stepHandlers) {
handler.reset();
}
CombinedEventsManager manager = addEndTimeChecker(t0, t, eventsHandlersManager);
boolean lastStep = false;
// main integration loop
while (!lastStep) {
interpolator.shift();
double error = 0;
for (boolean loop = true; loop;) {
if (firstTime || !fsal) {
// first stage
computeDerivatives(stepStart, y, yDotK[0]);
}
if (firstTime) {
final double[] scale = new double[y0.length];
if (vecAbsoluteTolerance == null) {
for (int i = 0; i < scale.length; ++i) {
scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * Math.abs(y[i]);
}
} else {
for (int i = 0; i < scale.length; ++i) {
scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * Math.abs(y[i]);
}
}
hNew = initializeStep(equations, forward, getOrder(), scale,
stepStart, y, yDotK[0], yTmp, yDotK[1]);
firstTime = false;
}
stepSize = hNew;
// next stages
for (int k = 1; k < stages; ++k) {
for (int j = 0; j < y0.length; ++j) {
double sum = a[k-1][0] * yDotK[0][j];
for (int l = 1; l < k; ++l) {
sum += a[k-1][l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
computeDerivatives(stepStart + c[k-1] * stepSize, yTmp, yDotK[k]);
}
// estimate the state at the end of the step
for (int j = 0; j < y0.length; ++j) {
double sum = b[0] * yDotK[0][j];
for (int l = 1; l < stages; ++l) {
sum += b[l] * yDotK[l][j];
}
yTmp[j] = y[j] + stepSize * sum;
}
// estimate the error at the end of the step
error = estimateError(yDotK, y, yTmp, stepSize);
if (error <= 1.0) {
// discrete events handling
interpolator.storeTime(stepStart + stepSize);
if (manager.evaluateStep(interpolator)) {
final double dt = manager.getEventTime() - stepStart;
if (Math.abs(dt) <= Math.ulp(stepStart)) {
// rejecting the step would lead to a too small next step, we accept it
loop = false;
} else {
// reject the step to match exactly the next switch time
hNew = dt;
}
} else {
// accept the step
loop = false;
}
} else {
// reject the step and attempt to reduce error by stepsize control
final double factor =
Math.min(maxGrowth,
Math.max(minReduction, safety * Math.pow(error, exp)));
hNew = filterStep(stepSize * factor, forward, false);
}
}
// the step has been accepted
final double nextStep = stepStart + stepSize;
System.arraycopy(yTmp, 0, y, 0, y0.length);
manager.stepAccepted(nextStep, y);
lastStep = manager.stop();
// provide the step data to the step handler
interpolator.storeTime(nextStep);
for (StepHandler handler : stepHandlers) {
handler.handleStep(interpolator, lastStep);
}
stepStart = nextStep;
if (fsal) {
// save the last evaluation for the next step
System.arraycopy(yDotK[stages - 1], 0, yDotK[0], 0, y0.length);
}
if (manager.reset(stepStart, y) && ! lastStep) {
// some event handler has triggered changes that
// invalidate the derivatives, we need to recompute them
computeDerivatives(stepStart, y, yDotK[0]);
}
if (! lastStep) {
// in some rare cases we may get here with stepSize = 0, for example
// when an event occurs at integration start, reducing the first step
// to zero; we have to reset the step to some safe non zero value
stepSize = filterStep(stepSize, forward, true);
// stepsize control for next step
final double factor = Math.min(maxGrowth,
Math.max(minReduction,
safety * Math.pow(error, exp)));
final double scaledH = stepSize * factor;
final double nextT = stepStart + scaledH;
final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
hNew = filterStep(scaledH, forward, nextIsLast);
}
}
final double stopTime = stepStart;
resetInternalState();
return stopTime;
} | src/main/java/org/apache/commons/math/ode/nonstiff/EmbeddedRungeKuttaIntegrator.java |
Math-75 | public double getPct(Object v) {
return getCumPct((Comparable<?>) v);
}
public double getPct(Object v) {
return getPct((Comparable<?>) v);
} | src/main/java/org/apache/commons/math/stat/Frequency.java |
Math-78 | public boolean evaluateStep(final StepInterpolator interpolator)
throws DerivativeException, EventException, ConvergenceException {
try {
forward = interpolator.isForward();
final double t1 = interpolator.getCurrentTime();
final int n = Math.max(1, (int) Math.ceil(Math.abs(t1 - t0) / maxCheckInterval));
final double h = (t1 - t0) / n;
double ta = t0;
double ga = g0;
double tb = t0 + (interpolator.isForward() ? convergence : -convergence);
for (int i = 0; i < n; ++i) {
// evaluate handler value at the end of the substep
tb += h;
interpolator.setInterpolatedTime(tb);
final double gb = handler.g(tb, interpolator.getInterpolatedState());
// check events occurrence
if (g0Positive ^ (gb >= 0)) {
// there is a sign change: an event is expected during this step
// this is a corner case:
// - there was an event near ta,
// - there is another event between ta and tb
// - when ta was computed, convergence was reached on the "wrong side" of the interval
// this implies that the real sign of ga is the same as gb, so we need to slightly
// shift ta to make sure ga and gb get opposite signs and the solver won't complain
// about bracketing
// this should never happen
// variation direction, with respect to the integration direction
increasing = gb >= ga;
final UnivariateRealFunction f = new UnivariateRealFunction() {
public double value(final double t) throws FunctionEvaluationException {
try {
interpolator.setInterpolatedTime(t);
return handler.g(t, interpolator.getInterpolatedState());
} catch (DerivativeException e) {
throw new FunctionEvaluationException(e, t);
} catch (EventException e) {
throw new FunctionEvaluationException(e, t);
}
}
};
final BrentSolver solver = new BrentSolver();
solver.setAbsoluteAccuracy(convergence);
solver.setMaximalIterationCount(maxIterationCount);
final double root = (ta <= tb) ? solver.solve(f, ta, tb) : solver.solve(f, tb, ta);
if ((Math.abs(root - ta) <= convergence) &&
(Math.abs(root - previousEventTime) <= convergence)) {
// we have either found nothing or found (again ?) a past event, we simply ignore it
ta = tb;
ga = gb;
} else if (Double.isNaN(previousEventTime) ||
(Math.abs(previousEventTime - root) > convergence)) {
pendingEventTime = root;
if (pendingEvent && (Math.abs(t1 - pendingEventTime) <= convergence)) {
// we were already waiting for this event which was
// found during a previous call for a step that was
// rejected, this step must now be accepted since it
// properly ends exactly at the event occurrence
return false;
}
// either we were not waiting for the event or it has
// moved in such a way the step cannot be accepted
pendingEvent = true;
return true;
}
} else {
// no sign change: there is no event for now
ta = tb;
ga = gb;
}
}
// no event during the whole step
pendingEvent = false;
pendingEventTime = Double.NaN;
return false;
} catch (FunctionEvaluationException e) {
final Throwable cause = e.getCause();
if ((cause != null) && (cause instanceof DerivativeException)) {
throw (DerivativeException) cause;
} else if ((cause != null) && (cause instanceof EventException)) {
throw (EventException) cause;
}
throw new EventException(e);
}
}
public boolean evaluateStep(final StepInterpolator interpolator)
throws DerivativeException, EventException, ConvergenceException {
try {
forward = interpolator.isForward();
final double t1 = interpolator.getCurrentTime();
final int n = Math.max(1, (int) Math.ceil(Math.abs(t1 - t0) / maxCheckInterval));
final double h = (t1 - t0) / n;
double ta = t0;
double ga = g0;
double tb = t0 + (interpolator.isForward() ? convergence : -convergence);
for (int i = 0; i < n; ++i) {
// evaluate handler value at the end of the substep
tb += h;
interpolator.setInterpolatedTime(tb);
final double gb = handler.g(tb, interpolator.getInterpolatedState());
// check events occurrence
if (g0Positive ^ (gb >= 0)) {
// there is a sign change: an event is expected during this step
if (ga * gb > 0) {
// this is a corner case:
// - there was an event near ta,
// - there is another event between ta and tb
// - when ta was computed, convergence was reached on the "wrong side" of the interval
// this implies that the real sign of ga is the same as gb, so we need to slightly
// shift ta to make sure ga and gb get opposite signs and the solver won't complain
// about bracketing
final double epsilon = (forward ? 0.25 : -0.25) * convergence;
for (int k = 0; (k < 4) && (ga * gb > 0); ++k) {
ta += epsilon;
interpolator.setInterpolatedTime(ta);
ga = handler.g(ta, interpolator.getInterpolatedState());
}
if (ga * gb > 0) {
// this should never happen
throw MathRuntimeException.createInternalError(null);
}
}
// variation direction, with respect to the integration direction
increasing = gb >= ga;
final UnivariateRealFunction f = new UnivariateRealFunction() {
public double value(final double t) throws FunctionEvaluationException {
try {
interpolator.setInterpolatedTime(t);
return handler.g(t, interpolator.getInterpolatedState());
} catch (DerivativeException e) {
throw new FunctionEvaluationException(e, t);
} catch (EventException e) {
throw new FunctionEvaluationException(e, t);
}
}
};
final BrentSolver solver = new BrentSolver();
solver.setAbsoluteAccuracy(convergence);
solver.setMaximalIterationCount(maxIterationCount);
final double root = (ta <= tb) ? solver.solve(f, ta, tb) : solver.solve(f, tb, ta);
if ((Math.abs(root - ta) <= convergence) &&
(Math.abs(root - previousEventTime) <= convergence)) {
// we have either found nothing or found (again ?) a past event, we simply ignore it
ta = tb;
ga = gb;
} else if (Double.isNaN(previousEventTime) ||
(Math.abs(previousEventTime - root) > convergence)) {
pendingEventTime = root;
if (pendingEvent && (Math.abs(t1 - pendingEventTime) <= convergence)) {
// we were already waiting for this event which was
// found during a previous call for a step that was
// rejected, this step must now be accepted since it
// properly ends exactly at the event occurrence
return false;
}
// either we were not waiting for the event or it has
// moved in such a way the step cannot be accepted
pendingEvent = true;
return true;
}
} else {
// no sign change: there is no event for now
ta = tb;
ga = gb;
}
}
// no event during the whole step
pendingEvent = false;
pendingEventTime = Double.NaN;
return false;
} catch (FunctionEvaluationException e) {
final Throwable cause = e.getCause();
if ((cause != null) && (cause instanceof DerivativeException)) {
throw (DerivativeException) cause;
} else if ((cause != null) && (cause instanceof EventException)) {
throw (EventException) cause;
}
throw new EventException(e);
}
} | src/main/java/org/apache/commons/math/ode/events/EventState.java |
Math-79 | public static double distance(int[] p1, int[] p2) {
int sum = 0;
for (int i = 0; i < p1.length; i++) {
final int dp = p1[i] - p2[i];
sum += dp * dp;
}
return Math.sqrt(sum);
}
public static double distance(int[] p1, int[] p2) {
double sum = 0;
for (int i = 0; i < p1.length; i++) {
final double dp = p1[i] - p2[i];
sum += dp * dp;
}
return Math.sqrt(sum);
} | src/main/java/org/apache/commons/math/util/MathUtils.java |
Math-8 | public T[] sample(int sampleSize) throws NotStrictlyPositiveException {
if (sampleSize <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.NUMBER_OF_SAMPLES,
sampleSize);
}
final T[]out = (T[]) java.lang.reflect.Array.newInstance(singletons.get(0).getClass(), sampleSize);
for (int i = 0; i < sampleSize; i++) {
out[i] = sample();
}
return out;
}
public Object[] sample(int sampleSize) throws NotStrictlyPositiveException {
if (sampleSize <= 0) {
throw new NotStrictlyPositiveException(LocalizedFormats.NUMBER_OF_SAMPLES,
sampleSize);
}
final Object[] out = new Object[sampleSize];
for (int i = 0; i < sampleSize; i++) {
out[i] = sample();
}
return out;
} | src/main/java/org/apache/commons/math3/distribution/DiscreteDistribution.java |
Math-80 | private boolean flipIfWarranted(final int n, final int step) {
if (1.5 * work[pingPong] < work[4 * (n - 1) + pingPong]) {
// flip array
int j = 4 * n - 1;
for (int i = 0; i < j; i += 4) {
for (int k = 0; k < 4; k += step) {
final double tmp = work[i + k];
work[i + k] = work[j - k];
work[j - k] = tmp;
}
j -= 4;
}
return true;
}
return false;
}
private boolean flipIfWarranted(final int n, final int step) {
if (1.5 * work[pingPong] < work[4 * (n - 1) + pingPong]) {
// flip array
int j = 4 * (n - 1);
for (int i = 0; i < j; i += 4) {
for (int k = 0; k < 4; k += step) {
final double tmp = work[i + k];
work[i + k] = work[j - k];
work[j - k] = tmp;
}
j -= 4;
}
return true;
}
return false;
} | src/main/java/org/apache/commons/math/linear/EigenDecompositionImpl.java |
Math-82 | private Integer getPivotRow(final int col, final SimplexTableau tableau) {
double minRatio = Double.MAX_VALUE;
Integer minRatioPos = null;
for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) {
final double rhs = tableau.getEntry(i, tableau.getWidth() - 1);
final double entry = tableau.getEntry(i, col);
if (MathUtils.compareTo(entry, 0, epsilon) >= 0) {
final double ratio = rhs / entry;
if (ratio < minRatio) {
minRatio = ratio;
minRatioPos = i;
}
}
}
return minRatioPos;
}
private Integer getPivotRow(final int col, final SimplexTableau tableau) {
double minRatio = Double.MAX_VALUE;
Integer minRatioPos = null;
for (int i = tableau.getNumObjectiveFunctions(); i < tableau.getHeight(); i++) {
final double rhs = tableau.getEntry(i, tableau.getWidth() - 1);
final double entry = tableau.getEntry(i, col);
if (MathUtils.compareTo(entry, 0, epsilon) > 0) {
final double ratio = rhs / entry;
if (ratio < minRatio) {
minRatio = ratio;
minRatioPos = i;
}
}
}
return minRatioPos;
} | src/main/java/org/apache/commons/math/optimization/linear/SimplexSolver.java |
Math-84 | protected void iterateSimplex(final Comparator<RealPointValuePair> comparator)
throws FunctionEvaluationException, OptimizationException, IllegalArgumentException {
while (true) {
incrementIterationsCounter();
// save the original vertex
final RealPointValuePair[] original = simplex;
final RealPointValuePair best = original[0];
// perform a reflection step
final RealPointValuePair reflected = evaluateNewSimplex(original, 1.0, comparator);
if (comparator.compare(reflected, best) < 0) {
// compute the expanded simplex
final RealPointValuePair[] reflectedSimplex = simplex;
final RealPointValuePair expanded = evaluateNewSimplex(original, khi, comparator);
if (comparator.compare(reflected, expanded) <= 0) {
// accept the reflected simplex
simplex = reflectedSimplex;
}
return;
}
// compute the contracted simplex
final RealPointValuePair contracted = evaluateNewSimplex(original, gamma, comparator);
if (comparator.compare(contracted, best) < 0) {
// accept the contracted simplex
// check convergence
return;
}
}
}
protected void iterateSimplex(final Comparator<RealPointValuePair> comparator)
throws FunctionEvaluationException, OptimizationException, IllegalArgumentException {
final RealConvergenceChecker checker = getConvergenceChecker();
while (true) {
incrementIterationsCounter();
// save the original vertex
final RealPointValuePair[] original = simplex;
final RealPointValuePair best = original[0];
// perform a reflection step
final RealPointValuePair reflected = evaluateNewSimplex(original, 1.0, comparator);
if (comparator.compare(reflected, best) < 0) {
// compute the expanded simplex
final RealPointValuePair[] reflectedSimplex = simplex;
final RealPointValuePair expanded = evaluateNewSimplex(original, khi, comparator);
if (comparator.compare(reflected, expanded) <= 0) {
// accept the reflected simplex
simplex = reflectedSimplex;
}
return;
}
// compute the contracted simplex
final RealPointValuePair contracted = evaluateNewSimplex(original, gamma, comparator);
if (comparator.compare(contracted, best) < 0) {
// accept the contracted simplex
return;
}
// check convergence
final int iter = getIterations();
boolean converged = true;
for (int i = 0; i < simplex.length; ++i) {
converged &= checker.converged(iter, original[i], simplex[i]);
}
if (converged) {
return;
}
}
} | src/main/java/org/apache/commons/math/optimization/direct/MultiDirectional.java |
Math-85 | public static double[] bracket(UnivariateRealFunction function,
double initial, double lowerBound, double upperBound,
int maximumIterations) throws ConvergenceException,
FunctionEvaluationException {
if (function == null) {
throw MathRuntimeException.createIllegalArgumentException("function is null");
}
if (maximumIterations <= 0) {
throw MathRuntimeException.createIllegalArgumentException(
"bad value for maximum iterations number: {0}", maximumIterations);
}
if (initial < lowerBound || initial > upperBound || lowerBound >= upperBound) {
throw MathRuntimeException.createIllegalArgumentException(
"invalid bracketing parameters: lower bound={0}, initial={1}, upper bound={2}",
lowerBound, initial, upperBound);
}
double a = initial;
double b = initial;
double fa;
double fb;
int numIterations = 0 ;
do {
a = Math.max(a - 1.0, lowerBound);
b = Math.min(b + 1.0, upperBound);
fa = function.value(a);
fb = function.value(b);
numIterations++ ;
} while ((fa * fb > 0.0) && (numIterations < maximumIterations) &&
((a > lowerBound) || (b < upperBound)));
if (fa * fb >= 0.0 ) {
throw new ConvergenceException(
"number of iterations={0}, maximum iterations={1}, " +
"initial={2}, lower bound={3}, upper bound={4}, final a value={5}, " +
"final b value={6}, f(a)={7}, f(b)={8}",
numIterations, maximumIterations, initial,
lowerBound, upperBound, a, b, fa, fb);
}
return new double[]{a, b};
}
public static double[] bracket(UnivariateRealFunction function,
double initial, double lowerBound, double upperBound,
int maximumIterations) throws ConvergenceException,
FunctionEvaluationException {
if (function == null) {
throw MathRuntimeException.createIllegalArgumentException("function is null");
}
if (maximumIterations <= 0) {
throw MathRuntimeException.createIllegalArgumentException(
"bad value for maximum iterations number: {0}", maximumIterations);
}
if (initial < lowerBound || initial > upperBound || lowerBound >= upperBound) {
throw MathRuntimeException.createIllegalArgumentException(
"invalid bracketing parameters: lower bound={0}, initial={1}, upper bound={2}",
lowerBound, initial, upperBound);
}
double a = initial;
double b = initial;
double fa;
double fb;
int numIterations = 0 ;
do {
a = Math.max(a - 1.0, lowerBound);
b = Math.min(b + 1.0, upperBound);
fa = function.value(a);
fb = function.value(b);
numIterations++ ;
} while ((fa * fb > 0.0) && (numIterations < maximumIterations) &&
((a > lowerBound) || (b < upperBound)));
if (fa * fb > 0.0 ) {
throw new ConvergenceException(
"number of iterations={0}, maximum iterations={1}, " +
"initial={2}, lower bound={3}, upper bound={4}, final a value={5}, " +
"final b value={6}, f(a)={7}, f(b)={8}",
numIterations, maximumIterations, initial,
lowerBound, upperBound, a, b, fa, fb);
}
return new double[]{a, b};
} | src/java/org/apache/commons/math/analysis/solvers/UnivariateRealSolverUtils.java |
Math-86 | public CholeskyDecompositionImpl(final RealMatrix matrix,
final double relativeSymmetryThreshold,
final double absolutePositivityThreshold)
throws NonSquareMatrixException,
NotSymmetricMatrixException, NotPositiveDefiniteMatrixException {
if (!matrix.isSquare()) {
throw new NonSquareMatrixException(matrix.getRowDimension(),
matrix.getColumnDimension());
}
final int order = matrix.getRowDimension();
lTData = matrix.getData();
cachedL = null;
cachedLT = null;
// check the matrix before transformation
for (int i = 0; i < order; ++i) {
final double[] lI = lTData[i];
if (lTData[i][i] < absolutePositivityThreshold) {
throw new NotPositiveDefiniteMatrixException();
}
// check off-diagonal elements (and reset them to 0)
for (int j = i + 1; j < order; ++j) {
final double[] lJ = lTData[j];
final double lIJ = lI[j];
final double lJI = lJ[i];
final double maxDelta =
relativeSymmetryThreshold * Math.max(Math.abs(lIJ), Math.abs(lJI));
if (Math.abs(lIJ - lJI) > maxDelta) {
throw new NotSymmetricMatrixException();
}
lJ[i] = 0;
}
}
// transform the matrix
for (int i = 0; i < order; ++i) {
final double[] ltI = lTData[i];
// check diagonal element
ltI[i] = Math.sqrt(ltI[i]);
final double inverse = 1.0 / ltI[i];
for (int q = order - 1; q > i; --q) {
ltI[q] *= inverse;
final double[] ltQ = lTData[q];
for (int p = q; p < order; ++p) {
ltQ[p] -= ltI[q] * ltI[p];
}
}
}
}
public CholeskyDecompositionImpl(final RealMatrix matrix,
final double relativeSymmetryThreshold,
final double absolutePositivityThreshold)
throws NonSquareMatrixException,
NotSymmetricMatrixException, NotPositiveDefiniteMatrixException {
if (!matrix.isSquare()) {
throw new NonSquareMatrixException(matrix.getRowDimension(),
matrix.getColumnDimension());
}
final int order = matrix.getRowDimension();
lTData = matrix.getData();
cachedL = null;
cachedLT = null;
// check the matrix before transformation
for (int i = 0; i < order; ++i) {
final double[] lI = lTData[i];
// check off-diagonal elements (and reset them to 0)
for (int j = i + 1; j < order; ++j) {
final double[] lJ = lTData[j];
final double lIJ = lI[j];
final double lJI = lJ[i];
final double maxDelta =
relativeSymmetryThreshold * Math.max(Math.abs(lIJ), Math.abs(lJI));
if (Math.abs(lIJ - lJI) > maxDelta) {
throw new NotSymmetricMatrixException();
}
lJ[i] = 0;
}
}
// transform the matrix
for (int i = 0; i < order; ++i) {
final double[] ltI = lTData[i];
// check diagonal element
if (ltI[i] < absolutePositivityThreshold) {
throw new NotPositiveDefiniteMatrixException();
}
ltI[i] = Math.sqrt(ltI[i]);
final double inverse = 1.0 / ltI[i];
for (int q = order - 1; q > i; --q) {
ltI[q] *= inverse;
final double[] ltQ = lTData[q];
for (int p = q; p < order; ++p) {
ltQ[p] -= ltI[q] * ltI[p];
}
}
}
} | src/java/org/apache/commons/math/linear/CholeskyDecompositionImpl.java |
Math-87 | private Integer getBasicRow(final int col) {
Integer row = null;
for (int i = getNumObjectiveFunctions(); i < getHeight(); i++) {
if (!MathUtils.equals(getEntry(i, col), 0.0, epsilon)) {
if (row == null) {
row = i;
} else {
return null;
}
}
}
return row;
}
private Integer getBasicRow(final int col) {
Integer row = null;
for (int i = getNumObjectiveFunctions(); i < getHeight(); i++) {
if (MathUtils.equals(getEntry(i, col), 1.0, epsilon) && (row == null)) {
row = i;
} else if (!MathUtils.equals(getEntry(i, col), 0.0, epsilon)) {
return null;
}
}
return row;
} | src/java/org/apache/commons/math/optimization/linear/SimplexTableau.java |
Math-88 | protected RealPointValuePair getSolution() {
double[] coefficients = new double[getOriginalNumDecisionVariables()];
Integer basicRow =
getBasicRow(getNumObjectiveFunctions() + getOriginalNumDecisionVariables());
double mostNegative = basicRow == null ? 0 : getEntry(basicRow, getRhsOffset());
for (int i = 0; i < coefficients.length; i++) {
basicRow = getBasicRow(getNumObjectiveFunctions() + i);
// if multiple variables can take a given value
// then we choose the first and set the rest equal to 0
coefficients[i] =
(basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
(restrictToNonNegative ? 0 : mostNegative);
if (basicRow != null) {
for (int j = getNumObjectiveFunctions(); j < getNumObjectiveFunctions() + i; j++) {
if (tableau.getEntry(basicRow, j) == 1) {
coefficients[i] = 0;
}
}
}
}
return new RealPointValuePair(coefficients, f.getValue(coefficients));
}
protected RealPointValuePair getSolution() {
double[] coefficients = new double[getOriginalNumDecisionVariables()];
Integer basicRow =
getBasicRow(getNumObjectiveFunctions() + getOriginalNumDecisionVariables());
double mostNegative = basicRow == null ? 0 : getEntry(basicRow, getRhsOffset());
Set<Integer> basicRows = new HashSet<Integer>();
for (int i = 0; i < coefficients.length; i++) {
basicRow = getBasicRow(getNumObjectiveFunctions() + i);
if (basicRows.contains(basicRow)) {
// if multiple variables can take a given value
// then we choose the first and set the rest equal to 0
coefficients[i] = 0;
} else {
basicRows.add(basicRow);
coefficients[i] =
(basicRow == null ? 0 : getEntry(basicRow, getRhsOffset())) -
(restrictToNonNegative ? 0 : mostNegative);
}
}
return new RealPointValuePair(coefficients, f.getValue(coefficients));
} | src/java/org/apache/commons/math/optimization/linear/SimplexTableau.java |
Math-89 | public void addValue(Object v) {
addValue((Comparable<?>) v);
}
public void addValue(Object v) {
if (v instanceof Comparable<?>){
addValue((Comparable<?>) v);
} else {
throw new IllegalArgumentException("Object must implement Comparable");
}
} | src/java/org/apache/commons/math/stat/Frequency.java |
Math-9 | public Line revert() {
final Line reverted = new Line(zero, zero.subtract(direction));
return reverted;
}
public Line revert() {
final Line reverted = new Line(this);
reverted.direction = reverted.direction.negate();
return reverted;
} | src/main/java/org/apache/commons/math3/geometry/euclidean/threed/Line.java |
Math-90 | public void addValue(Object v) {
/**
* Adds 1 to the frequency count for v.
* <p>
* If other objects have already been added to this Frequency, v must
* be comparable to those that have already been added.
* </p>
*
* @param v the value to add.
* @throws IllegalArgumentException if <code>v</code> is not comparable with previous entries
*/
Object obj = v;
if (v instanceof Integer) {
obj = Long.valueOf(((Integer) v).longValue());
}
try {
Long count = (Long) freqTable.get(obj);
if (count == null) {
freqTable.put(obj, Long.valueOf(1));
} else {
freqTable.put(obj, Long.valueOf(count.longValue() + 1));
}
} catch (ClassCastException ex) {
//TreeMap will throw ClassCastException if v is not comparable
throw new IllegalArgumentException("Value not comparable to existing values.");
}
}
public void addValue(Object v) {
addValue((Comparable<?>) v);
} | src/java/org/apache/commons/math/stat/Frequency.java |
Math-91 | public int compareTo(Fraction object) {
double nOd = doubleValue();
double dOn = object.doubleValue();
return (nOd < dOn) ? -1 : ((nOd > dOn) ? +1 : 0);
}
public int compareTo(Fraction object) {
long nOd = ((long) numerator) * object.denominator;
long dOn = ((long) denominator) * object.numerator;
return (nOd < dOn) ? -1 : ((nOd > dOn) ? +1 : 0);
} | src/java/org/apache/commons/math/fraction/Fraction.java |
Math-94 | public static int gcd(int u, int v) {
if (u * v == 0) {
return (Math.abs(u) + Math.abs(v));
}
// keep u and v negative, as negative integers range down to
// -2^31, while positive numbers can only be as large as 2^31-1
// (i.e. we can't necessarily negate a negative number without
// overflow)
/* assert u!=0 && v!=0; */
if (u > 0) {
u = -u;
} // make u negative
if (v > 0) {
v = -v;
} // make v negative
// B1. [Find power of 2]
int k = 0;
while ((u & 1) == 0 && (v & 1) == 0 && k < 31) { // while u and v are
// both even...
u /= 2;
v /= 2;
k++; // cast out twos.
}
if (k == 31) {
throw new ArithmeticException("overflow: gcd is 2^31");
}
// B2. Initialize: u and v have been divided by 2^k and at least
// one is odd.
int t = ((u & 1) == 1) ? v : -(u / 2)/* B3 */;
// t negative: u was odd, v may be even (t replaces v)
// t positive: u was even, v is odd (t replaces u)
do {
/* assert u<0 && v<0; */
// B4/B3: cast out twos from t.
while ((t & 1) == 0) { // while t is even..
t /= 2; // cast out twos
}
// B5 [reset max(u,v)]
if (t > 0) {
u = -t;
} else {
v = t;
}
// B6/B3. at this point both u and v should be odd.
t = (v - u) / 2;
// |u| larger: t positive (replace u)
// |v| larger: t negative (replace v)
} while (t != 0);
return -u * (1 << k); // gcd is u*2^k
}
public static int gcd(int u, int v) {
if ((u == 0) || (v == 0)) {
return (Math.abs(u) + Math.abs(v));
}
// keep u and v negative, as negative integers range down to
// -2^31, while positive numbers can only be as large as 2^31-1
// (i.e. we can't necessarily negate a negative number without
// overflow)
/* assert u!=0 && v!=0; */
if (u > 0) {
u = -u;
} // make u negative
if (v > 0) {
v = -v;
} // make v negative
// B1. [Find power of 2]
int k = 0;
while ((u & 1) == 0 && (v & 1) == 0 && k < 31) { // while u and v are
// both even...
u /= 2;
v /= 2;
k++; // cast out twos.
}
if (k == 31) {
throw new ArithmeticException("overflow: gcd is 2^31");
}
// B2. Initialize: u and v have been divided by 2^k and at least
// one is odd.
int t = ((u & 1) == 1) ? v : -(u / 2)/* B3 */;
// t negative: u was odd, v may be even (t replaces v)
// t positive: u was even, v is odd (t replaces u)
do {
/* assert u<0 && v<0; */
// B4/B3: cast out twos from t.
while ((t & 1) == 0) { // while t is even..
t /= 2; // cast out twos
}
// B5 [reset max(u,v)]
if (t > 0) {
u = -t;
} else {
v = t;
}
// B6/B3. at this point both u and v should be odd.
t = (v - u) / 2;
// |u| larger: t positive (replace u)
// |v| larger: t negative (replace v)
} while (t != 0);
return -u * (1 << k); // gcd is u*2^k
} | src/java/org/apache/commons/math/util/MathUtils.java |
Math-95 | protected double getInitialDomain(double p) {
double ret;
double d = getDenominatorDegreesOfFreedom();
// use mean
ret = d / (d - 2.0);
return ret;
}
protected double getInitialDomain(double p) {
double ret = 1.0;
double d = getDenominatorDegreesOfFreedom();
if (d > 2.0) {
// use mean
ret = d / (d - 2.0);
}
return ret;
} | src/java/org/apache/commons/math/distribution/FDistributionImpl.java |
Math-96 | public boolean equals(Object other) {
boolean ret;
if (this == other) {
ret = true;
} else if (other == null) {
ret = false;
} else {
try {
Complex rhs = (Complex)other;
if (rhs.isNaN()) {
ret = this.isNaN();
} else {
ret = (Double.doubleToRawLongBits(real) == Double.doubleToRawLongBits(rhs.getReal())) && (Double.doubleToRawLongBits(imaginary) == Double.doubleToRawLongBits(rhs.getImaginary()));
}
} catch (ClassCastException ex) {
// ignore exception
ret = false;
}
}
return ret;
}
public boolean equals(Object other) {
boolean ret;
if (this == other) {
ret = true;
} else if (other == null) {
ret = false;
} else {
try {
Complex rhs = (Complex)other;
if (rhs.isNaN()) {
ret = this.isNaN();
} else {
ret = (real == rhs.real) && (imaginary == rhs.imaginary);
}
} catch (ClassCastException ex) {
// ignore exception
ret = false;
}
}
return ret;
} | src/java/org/apache/commons/math/complex/Complex.java |
Math-97 | public double solve(double min, double max) throws MaxIterationsExceededException,
FunctionEvaluationException {
clearResult();
verifyInterval(min, max);
double ret = Double.NaN;
double yMin = f.value(min);
double yMax = f.value(max);
// Verify bracketing
double sign = yMin * yMax;
if (sign >= 0) {
// check if either value is close to a zero
// neither value is close to zero and min and max do not bracket root.
throw new IllegalArgumentException
("Function values at endpoints do not have different signs." +
" Endpoints: [" + min + "," + max + "]" +
" Values: [" + yMin + "," + yMax + "]");
} else {
// solve using only the first endpoint as initial guess
ret = solve(min, yMin, max, yMax, min, yMin);
// either min or max is a root
}
return ret;
}
public double solve(double min, double max) throws MaxIterationsExceededException,
FunctionEvaluationException {
clearResult();
verifyInterval(min, max);
double ret = Double.NaN;
double yMin = f.value(min);
double yMax = f.value(max);
// Verify bracketing
double sign = yMin * yMax;
if (sign > 0) {
// check if either value is close to a zero
if (Math.abs(yMin) <= functionValueAccuracy) {
setResult(min, 0);
ret = min;
} else if (Math.abs(yMax) <= functionValueAccuracy) {
setResult(max, 0);
ret = max;
} else {
// neither value is close to zero and min and max do not bracket root.
throw new IllegalArgumentException
("Function values at endpoints do not have different signs." +
" Endpoints: [" + min + "," + max + "]" +
" Values: [" + yMin + "," + yMax + "]");
}
} else if (sign < 0){
// solve using only the first endpoint as initial guess
ret = solve(min, yMin, max, yMax, min, yMin);
} else {
// either min or max is a root
if (yMin == 0.0) {
ret = min;
} else {
ret = max;
}
}
return ret;
} | src/java/org/apache/commons/math/analysis/BrentSolver.java |
Mockito-1 | public void captureArgumentsFrom(Invocation invocation) {
if (invocation.getMethod().isVarArgs()) {
int indexOfVararg = invocation.getRawArguments().length - 1;
throw new UnsupportedOperationException();
} else {
for (int position = 0; position < matchers.size(); position++) {
Matcher m = matchers.get(position);
if (m instanceof CapturesArguments) {
((CapturesArguments) m).captureFrom(invocation.getArgumentAt(position, Object.class));
}
}
}
// for (int position = 0; position < matchers.size(); position++) {
// Matcher m = matchers.get(position);
// if (m instanceof CapturesArguments && invocation.getRawArguments().length > position) {
// //TODO SF - this whole lot can be moved captureFrom implementation
// if(isVariableArgument(invocation, position) && isVarargMatcher(m)) {
// Object array = invocation.getRawArguments()[position];
// for (int i = 0; i < Array.getLength(array); i++) {
// ((CapturesArguments) m).captureFrom(Array.get(array, i));
// }
// //since we've captured all varargs already, it does not make sense to process other matchers.
// return;
// } else {
// ((CapturesArguments) m).captureFrom(invocation.getRawArguments()[position]);
// }
// }
// }
}
public void captureArgumentsFrom(Invocation invocation) {
if (invocation.getMethod().isVarArgs()) {
int indexOfVararg = invocation.getRawArguments().length - 1;
for (int position = 0; position < indexOfVararg; position++) {
Matcher m = matchers.get(position);
if (m instanceof CapturesArguments) {
((CapturesArguments) m).captureFrom(invocation.getArgumentAt(position, Object.class));
}
}
for (int position = indexOfVararg; position < matchers.size(); position++) {
Matcher m = matchers.get(position);
if (m instanceof CapturesArguments) {
((CapturesArguments) m).captureFrom(invocation.getRawArguments()[position - indexOfVararg]);
}
}
} else {
for (int position = 0; position < matchers.size(); position++) {
Matcher m = matchers.get(position);
if (m instanceof CapturesArguments) {
((CapturesArguments) m).captureFrom(invocation.getArgumentAt(position, Object.class));
}
}
}
// for (int position = 0; position < matchers.size(); position++) {
// Matcher m = matchers.get(position);
// if (m instanceof CapturesArguments && invocation.getRawArguments().length > position) {
// //TODO SF - this whole lot can be moved captureFrom implementation
// if(isVariableArgument(invocation, position) && isVarargMatcher(m)) {
// Object array = invocation.getRawArguments()[position];
// for (int i = 0; i < Array.getLength(array); i++) {
// ((CapturesArguments) m).captureFrom(Array.get(array, i));
// }
// //since we've captured all varargs already, it does not make sense to process other matchers.
// return;
// } else {
// ((CapturesArguments) m).captureFrom(invocation.getRawArguments()[position]);
// }
// }
// }
} | src/org/mockito/internal/invocation/InvocationMatcher.java |
Mockito-12 | public Class getGenericType(Field field) {
Type generic = field.getGenericType();
if (generic != null && generic instanceof ParameterizedType) {
Type actual = ((ParameterizedType) generic).getActualTypeArguments()[0];
return (Class) actual;
//in case of nested generics we don't go deep
}
return Object.class;
}
public Class getGenericType(Field field) {
Type generic = field.getGenericType();
if (generic != null && generic instanceof ParameterizedType) {
Type actual = ((ParameterizedType) generic).getActualTypeArguments()[0];
if (actual instanceof Class) {
return (Class) actual;
} else if (actual instanceof ParameterizedType) {
//in case of nested generics we don't go deep
return (Class) ((ParameterizedType) actual).getRawType();
}
}
return Object.class;
} | src/org/mockito/internal/util/reflection/GenericMaster.java |
Mockito-13 | public Object handle(Invocation invocation) throws Throwable {
if (invocationContainerImpl.hasAnswersForStubbing()) {
// stubbing voids with stubVoid() or doAnswer() style
InvocationMatcher invocationMatcher = matchersBinder.bindMatchers(mockingProgress
.getArgumentMatcherStorage(), invocation);
invocationContainerImpl.setMethodForStubbing(invocationMatcher);
return null;
}
VerificationMode verificationMode = mockingProgress.pullVerificationMode();
InvocationMatcher invocationMatcher = matchersBinder.bindMatchers(mockingProgress.getArgumentMatcherStorage(),
invocation);
mockingProgress.validateState();
//if verificationMode is not null then someone is doing verify()
if (verificationMode != null) {
//We need to check if verification was started on the correct mock
// - see VerifyingWithAnExtraCallToADifferentMockTest (bug 138)
if (verificationMode instanceof MockAwareVerificationMode && ((MockAwareVerificationMode) verificationMode).getMock() == invocation.getMock()) {
VerificationDataImpl data = new VerificationDataImpl(invocationContainerImpl, invocationMatcher);
verificationMode.verify(data);
return null;
// this means there is an invocation on a different mock. Re-adding verification mode
// - see VerifyingWithAnExtraCallToADifferentMockTest (bug 138)
}
}
invocationContainerImpl.setInvocationForPotentialStubbing(invocationMatcher);
OngoingStubbingImpl<T> ongoingStubbing = new OngoingStubbingImpl<T>(invocationContainerImpl);
mockingProgress.reportOngoingStubbing(ongoingStubbing);
StubbedInvocationMatcher stubbedInvocation = invocationContainerImpl.findAnswerFor(invocation);
if (stubbedInvocation != null) {
stubbedInvocation.captureArgumentsFrom(invocation);
return stubbedInvocation.answer(invocation);
} else {
Object ret = mockSettings.getDefaultAnswer().answer(invocation);
// redo setting invocation for potential stubbing in case of partial
// mocks / spies.
// Without it, the real method inside 'when' might have delegated
// to other self method and overwrite the intended stubbed method
// with a different one. The reset is required to avoid runtime exception that validates return type with stubbed method signature.
invocationContainerImpl.resetInvocationForPotentialStubbing(invocationMatcher);
return ret;
}
}
public Object handle(Invocation invocation) throws Throwable {
if (invocationContainerImpl.hasAnswersForStubbing()) {
// stubbing voids with stubVoid() or doAnswer() style
InvocationMatcher invocationMatcher = matchersBinder.bindMatchers(mockingProgress
.getArgumentMatcherStorage(), invocation);
invocationContainerImpl.setMethodForStubbing(invocationMatcher);
return null;
}
VerificationMode verificationMode = mockingProgress.pullVerificationMode();
InvocationMatcher invocationMatcher = matchersBinder.bindMatchers(mockingProgress.getArgumentMatcherStorage(),
invocation);
mockingProgress.validateState();
//if verificationMode is not null then someone is doing verify()
if (verificationMode != null) {
//We need to check if verification was started on the correct mock
// - see VerifyingWithAnExtraCallToADifferentMockTest (bug 138)
if (((MockAwareVerificationMode) verificationMode).getMock() == invocation.getMock()) {
VerificationDataImpl data = new VerificationDataImpl(invocationContainerImpl, invocationMatcher);
verificationMode.verify(data);
return null;
} else {
// this means there is an invocation on a different mock. Re-adding verification mode
// - see VerifyingWithAnExtraCallToADifferentMockTest (bug 138)
mockingProgress.verificationStarted(verificationMode);
}
}
invocationContainerImpl.setInvocationForPotentialStubbing(invocationMatcher);
OngoingStubbingImpl<T> ongoingStubbing = new OngoingStubbingImpl<T>(invocationContainerImpl);
mockingProgress.reportOngoingStubbing(ongoingStubbing);
StubbedInvocationMatcher stubbedInvocation = invocationContainerImpl.findAnswerFor(invocation);
if (stubbedInvocation != null) {
stubbedInvocation.captureArgumentsFrom(invocation);
return stubbedInvocation.answer(invocation);
} else {
Object ret = mockSettings.getDefaultAnswer().answer(invocation);
// redo setting invocation for potential stubbing in case of partial
// mocks / spies.
// Without it, the real method inside 'when' might have delegated
// to other self method and overwrite the intended stubbed method
// with a different one. The reset is required to avoid runtime exception that validates return type with stubbed method signature.
invocationContainerImpl.resetInvocationForPotentialStubbing(invocationMatcher);
return ret;
}
} | src/org/mockito/internal/MockHandler.java |
Mockito-18 | Object returnValueFor(Class<?> type) {
if (Primitives.isPrimitiveOrWrapper(type)) {
return Primitives.defaultValueForPrimitiveOrWrapper(type);
//new instances are used instead of Collections.emptyList(), etc.
//to avoid UnsupportedOperationException if code under test modifies returned collection
} else if (type == Collection.class) {
return new LinkedList<Object>();
} else if (type == Set.class) {
return new HashSet<Object>();
} else if (type == HashSet.class) {
return new HashSet<Object>();
} else if (type == SortedSet.class) {
return new TreeSet<Object>();
} else if (type == TreeSet.class) {
return new TreeSet<Object>();
} else if (type == LinkedHashSet.class) {
return new LinkedHashSet<Object>();
} else if (type == List.class) {
return new LinkedList<Object>();
} else if (type == LinkedList.class) {
return new LinkedList<Object>();
} else if (type == ArrayList.class) {
return new ArrayList<Object>();
} else if (type == Map.class) {
return new HashMap<Object, Object>();
} else if (type == HashMap.class) {
return new HashMap<Object, Object>();
} else if (type == SortedMap.class) {
return new TreeMap<Object, Object>();
} else if (type == TreeMap.class) {
return new TreeMap<Object, Object>();
} else if (type == LinkedHashMap.class) {
return new LinkedHashMap<Object, Object>();
}
//Let's not care about the rest of collections.
return null;
}
Object returnValueFor(Class<?> type) {
if (Primitives.isPrimitiveOrWrapper(type)) {
return Primitives.defaultValueForPrimitiveOrWrapper(type);
//new instances are used instead of Collections.emptyList(), etc.
//to avoid UnsupportedOperationException if code under test modifies returned collection
} else if (type == Iterable.class) {
return new ArrayList<Object>(0);
} else if (type == Collection.class) {
return new LinkedList<Object>();
} else if (type == Set.class) {
return new HashSet<Object>();
} else if (type == HashSet.class) {
return new HashSet<Object>();
} else if (type == SortedSet.class) {
return new TreeSet<Object>();
} else if (type == TreeSet.class) {
return new TreeSet<Object>();
} else if (type == LinkedHashSet.class) {
return new LinkedHashSet<Object>();
} else if (type == List.class) {
return new LinkedList<Object>();
} else if (type == LinkedList.class) {
return new LinkedList<Object>();
} else if (type == ArrayList.class) {
return new ArrayList<Object>();
} else if (type == Map.class) {
return new HashMap<Object, Object>();
} else if (type == HashMap.class) {
return new HashMap<Object, Object>();
} else if (type == SortedMap.class) {
return new TreeMap<Object, Object>();
} else if (type == TreeMap.class) {
return new TreeMap<Object, Object>();
} else if (type == LinkedHashMap.class) {
return new LinkedHashMap<Object, Object>();
}
//Let's not care about the rest of collections.
return null;
} | src/org/mockito/internal/stubbing/defaultanswers/ReturnsEmptyValues.java |
Mockito-20 | public <T> T createMock(MockCreationSettings<T> settings, MockHandler handler) {
if (settings.getSerializableMode() == SerializableMode.ACROSS_CLASSLOADERS) {
throw new MockitoException("Serialization across classloaders not yet supported with ByteBuddyMockMaker");
}
Class<? extends T> mockedProxyType = cachingMockBytecodeGenerator.get(
settings.getTypeToMock(),
settings.getExtraInterfaces()
);
T mockInstance = null;
try {
mockInstance = classInstantiator.instantiate(mockedProxyType);
MockMethodInterceptor.MockAccess mockAccess = (MockMethodInterceptor.MockAccess) mockInstance;
mockAccess.setMockitoInterceptor(new MockMethodInterceptor(asInternalMockHandler(handler), settings));
return ensureMockIsAssignableToMockedType(settings, mockInstance);
} catch (ClassCastException cce) {
throw new MockitoException(join(
"ClassCastException occurred while creating the mockito mock :",
" class to mock : " + describeClass(mockedProxyType),
" created class : " + describeClass(settings.getTypeToMock()),
" proxy instance class : " + describeClass(mockInstance),
" instance creation by : " + classInstantiator.getClass().getSimpleName(),
"",
"You might experience classloading issues, please ask the mockito mailing-list.",
""
),cce);
} catch (org.mockito.internal.creation.instance.InstantiationException e) {
throw new MockitoException("Unable to create mock instance of type '" + mockedProxyType.getSuperclass().getSimpleName() + "'", e);
}
}
public <T> T createMock(MockCreationSettings<T> settings, MockHandler handler) {
if (settings.getSerializableMode() == SerializableMode.ACROSS_CLASSLOADERS) {
throw new MockitoException("Serialization across classloaders not yet supported with ByteBuddyMockMaker");
}
Class<? extends T> mockedProxyType = cachingMockBytecodeGenerator.get(
settings.getTypeToMock(),
settings.getExtraInterfaces()
);
Instantiator instantiator = new InstantiatorProvider().getInstantiator(settings);
T mockInstance = null;
try {
mockInstance = instantiator.newInstance(mockedProxyType);
MockMethodInterceptor.MockAccess mockAccess = (MockMethodInterceptor.MockAccess) mockInstance;
mockAccess.setMockitoInterceptor(new MockMethodInterceptor(asInternalMockHandler(handler), settings));
return ensureMockIsAssignableToMockedType(settings, mockInstance);
} catch (ClassCastException cce) {
throw new MockitoException(join(
"ClassCastException occurred while creating the mockito mock :",
" class to mock : " + describeClass(mockedProxyType),
" created class : " + describeClass(settings.getTypeToMock()),
" proxy instance class : " + describeClass(mockInstance),
" instance creation by : " + instantiator.getClass().getSimpleName(),
"",
"You might experience classloading issues, please ask the mockito mailing-list.",
""
),cce);
} catch (org.mockito.internal.creation.instance.InstantiationException e) {
throw new MockitoException("Unable to create mock instance of type '" + mockedProxyType.getSuperclass().getSimpleName() + "'", e);
}
} | src/org/mockito/internal/creation/bytebuddy/ByteBuddyMockMaker.java |
Mockito-22 | public static boolean areEqual(Object o1, Object o2) {
if (o1 == null || o2 == null) {
return o1 == null && o2 == null;
} else if (isArray(o1)) {
return isArray(o2) && areArraysEqual(o1, o2);
} else {
return o1.equals(o2);
}
}
public static boolean areEqual(Object o1, Object o2) {
if (o1 == o2 ) {
return true;
} else if (o1 == null || o2 == null) {
return o1 == null && o2 == null;
} else if (isArray(o1)) {
return isArray(o2) && areArraysEqual(o1, o2);
} else {
return o1.equals(o2);
}
} | src/org/mockito/internal/matchers/Equality.java |
Mockito-24 | public Object answer(InvocationOnMock invocation) {
if (methodsGuru.isToString(invocation.getMethod())) {
Object mock = invocation.getMock();
MockName name = mockUtil.getMockName(mock);
if (name.isDefault()) {
return "Mock for " + mockUtil.getMockSettings(mock).getTypeToMock().getSimpleName() + ", hashCode: " + mock.hashCode();
} else {
return name.toString();
}
} else if (methodsGuru.isCompareToMethod(invocation.getMethod())) {
//see issue 184.
//mocks by default should return 0 if references are the same, otherwise some other value because they are not the same. Hence we return 1 (anything but 0 is good).
//Only for compareTo() method by the Comparable interface
return 1;
}
Class<?> returnType = invocation.getMethod().getReturnType();
return returnValueFor(returnType);
}
public Object answer(InvocationOnMock invocation) {
if (methodsGuru.isToString(invocation.getMethod())) {
Object mock = invocation.getMock();
MockName name = mockUtil.getMockName(mock);
if (name.isDefault()) {
return "Mock for " + mockUtil.getMockSettings(mock).getTypeToMock().getSimpleName() + ", hashCode: " + mock.hashCode();
} else {
return name.toString();
}
} else if (methodsGuru.isCompareToMethod(invocation.getMethod())) {
//see issue 184.
//mocks by default should return 0 if references are the same, otherwise some other value because they are not the same. Hence we return 1 (anything but 0 is good).
//Only for compareTo() method by the Comparable interface
return invocation.getMock() == invocation.getArguments()[0] ? 0 : 1;
}
Class<?> returnType = invocation.getMethod().getReturnType();
return returnValueFor(returnType);
} | src/org/mockito/internal/stubbing/defaultanswers/ReturnsEmptyValues.java |
Mockito-27 | public <T> void resetMock(T mock) {
MockHandlerInterface<T> oldMockHandler = getMockHandler(mock);
MockHandler<T> newMockHandler = new MockHandler<T>(oldMockHandler);
MethodInterceptorFilter newFilter = new MethodInterceptorFilter(newMockHandler, (MockSettingsImpl) org.mockito.Mockito.withSettings().defaultAnswer(org.mockito.Mockito.RETURNS_DEFAULTS));
((Factory) mock).setCallback(0, newFilter);
}
public <T> void resetMock(T mock) {
MockHandlerInterface<T> oldMockHandler = getMockHandler(mock);
MethodInterceptorFilter newFilter = newMethodInterceptorFilter(oldMockHandler.getMockSettings());
((Factory) mock).setCallback(0, newFilter);
} | src/org/mockito/internal/util/MockUtil.java |
Mockito-28 | private void injectMockCandidate(Class<?> awaitingInjectionClazz, Set<Object> mocks, Object fieldInstance) {
for(Field field : orderedInstanceFieldsFrom(awaitingInjectionClazz)) {
mockCandidateFilter.filterCandidate(mocks, field, fieldInstance).thenInject();
}
}
private void injectMockCandidate(Class<?> awaitingInjectionClazz, Set<Object> mocks, Object fieldInstance) {
for(Field field : orderedInstanceFieldsFrom(awaitingInjectionClazz)) {
Object injected = mockCandidateFilter.filterCandidate(mocks, field, fieldInstance).thenInject();
mocks.remove(injected);
}
} | src/org/mockito/internal/configuration/DefaultInjectionEngine.java |
Mockito-29 | public void describeTo(Description description) {
description.appendText("same(");
appendQuoting(description);
description.appendText(wanted.toString());
appendQuoting(description);
description.appendText(")");
}
public void describeTo(Description description) {
description.appendText("same(");
appendQuoting(description);
description.appendText(wanted == null ? "null" : wanted.toString());
appendQuoting(description);
description.appendText(")");
} | src/org/mockito/internal/matchers/Same.java |
Mockito-33 | public boolean hasSameMethod(Invocation candidate) {
//not using method.equals() for 1 good reason:
//sometimes java generates forwarding methods when generics are in play see JavaGenericsForwardingMethodsTest
Method m1 = invocation.getMethod();
Method m2 = candidate.getMethod();
/* Avoid unnecessary cloning */
return m1.equals(m2);
}
public boolean hasSameMethod(Invocation candidate) {
//not using method.equals() for 1 good reason:
//sometimes java generates forwarding methods when generics are in play see JavaGenericsForwardingMethodsTest
Method m1 = invocation.getMethod();
Method m2 = candidate.getMethod();
if (m1.getName() != null && m1.getName().equals(m2.getName())) {
/* Avoid unnecessary cloning */
Class[] params1 = m1.getParameterTypes();
Class[] params2 = m2.getParameterTypes();
if (params1.length == params2.length) {
for (int i = 0; i < params1.length; i++) {
if (params1[i] != params2[i])
return false;
}
return true;
}
}
return false;
} | src/org/mockito/internal/invocation/InvocationMatcher.java |
Mockito-34 | public void captureArgumentsFrom(Invocation i) {
int k = 0;
for (Matcher m : matchers) {
if (m instanceof CapturesArguments) {
((CapturesArguments) m).captureFrom(i.getArguments()[k]);
}
k++;
}
}
public void captureArgumentsFrom(Invocation i) {
int k = 0;
for (Matcher m : matchers) {
if (m instanceof CapturesArguments && i.getArguments().length > k) {
((CapturesArguments) m).captureFrom(i.getArguments()[k]);
}
k++;
}
} | src/org/mockito/internal/invocation/InvocationMatcher.java |
Mockito-38 | private boolean toStringEquals(Matcher m, Object arg) {
return StringDescription.toString(m).equals(arg.toString());
}
private boolean toStringEquals(Matcher m, Object arg) {
return StringDescription.toString(m).equals(arg == null? "null" : arg.toString());
} | src/org/mockito/internal/verification/argumentmatching/ArgumentMatchingTool.java |
Mockito-5 | public void verify(VerificationData data) {
AssertionError error = null;
timer.start();
while (timer.isCounting()) {
try {
delegate.verify(data);
if (returnOnSuccess) {
return;
} else {
error = null;
}
} catch (MockitoAssertionError e) {
error = handleVerifyException(e);
}
catch (org.mockito.exceptions.verification.junit.ArgumentsAreDifferent e) {
error = handleVerifyException(e);
}
}
if (error != null) {
throw error;
}
}
public void verify(VerificationData data) {
AssertionError error = null;
timer.start();
while (timer.isCounting()) {
try {
delegate.verify(data);
if (returnOnSuccess) {
return;
} else {
error = null;
}
} catch (MockitoAssertionError e) {
error = handleVerifyException(e);
}
catch (AssertionError e) {
error = handleVerifyException(e);
}
}
if (error != null) {
throw error;
}
} | src/org/mockito/internal/verification/VerificationOverTimeImpl.java |
Mockito-7 | private void readTypeVariables() {
for (Type type : typeVariable.getBounds()) {
registerTypeVariablesOn(type);
}
registerTypeVariablesOn(getActualTypeArgumentFor(typeVariable));
}
private void readTypeVariables() {
for (Type type : typeVariable.getBounds()) {
registerTypeVariablesOn(type);
}
registerTypeParametersOn(new TypeVariable[] { typeVariable });
registerTypeVariablesOn(getActualTypeArgumentFor(typeVariable));
} | src/org/mockito/internal/util/reflection/GenericMetadataSupport.java |
Mockito-8 | protected void registerTypeVariablesOn(Type classType) {
if (!(classType instanceof ParameterizedType)) {
return;
}
ParameterizedType parameterizedType = (ParameterizedType) classType;
TypeVariable[] typeParameters = ((Class<?>) parameterizedType.getRawType()).getTypeParameters();
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
for (int i = 0; i < actualTypeArguments.length; i++) {
TypeVariable typeParameter = typeParameters[i];
Type actualTypeArgument = actualTypeArguments[i];
if (actualTypeArgument instanceof WildcardType) {
contextualActualTypeParameters.put(typeParameter, boundsOf((WildcardType) actualTypeArgument));
} else {
contextualActualTypeParameters.put(typeParameter, actualTypeArgument);
}
// logger.log("For '" + parameterizedType + "' found type variable : { '" + typeParameter + "(" + System.identityHashCode(typeParameter) + ")" + "' : '" + actualTypeArgument + "(" + System.identityHashCode(typeParameter) + ")" + "' }");
}
}
protected void registerTypeVariablesOn(Type classType) {
if (!(classType instanceof ParameterizedType)) {
return;
}
ParameterizedType parameterizedType = (ParameterizedType) classType;
TypeVariable[] typeParameters = ((Class<?>) parameterizedType.getRawType()).getTypeParameters();
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
for (int i = 0; i < actualTypeArguments.length; i++) {
TypeVariable typeParameter = typeParameters[i];
Type actualTypeArgument = actualTypeArguments[i];
if (actualTypeArgument instanceof WildcardType) {
contextualActualTypeParameters.put(typeParameter, boundsOf((WildcardType) actualTypeArgument));
} else if (typeParameter != actualTypeArgument) {
contextualActualTypeParameters.put(typeParameter, actualTypeArgument);
}
// logger.log("For '" + parameterizedType + "' found type variable : { '" + typeParameter + "(" + System.identityHashCode(typeParameter) + ")" + "' : '" + actualTypeArgument + "(" + System.identityHashCode(typeParameter) + ")" + "' }");
}
} | src/org/mockito/internal/util/reflection/GenericMetadataSupport.java |
Time-14 | public int[] add(ReadablePartial partial, int fieldIndex, int[] values, int valueToAdd) {
// overridden as superclass algorithm can't handle
// 2004-02-29 + 48 months -> 2008-02-29 type dates
if (valueToAdd == 0) {
return values;
}
// month is largest field and being added to, such as month-day
if (DateTimeUtils.isContiguous(partial)) {
long instant = 0L;
for (int i = 0, isize = partial.size(); i < isize; i++) {
instant = partial.getFieldType(i).getField(iChronology).set(instant, values[i]);
}
instant = add(instant, valueToAdd);
return iChronology.get(partial, instant);
} else {
return super.add(partial, fieldIndex, values, valueToAdd);
}
}
public int[] add(ReadablePartial partial, int fieldIndex, int[] values, int valueToAdd) {
// overridden as superclass algorithm can't handle
// 2004-02-29 + 48 months -> 2008-02-29 type dates
if (valueToAdd == 0) {
return values;
}
if (partial.size() > 0 && partial.getFieldType(0).equals(DateTimeFieldType.monthOfYear()) && fieldIndex == 0) {
// month is largest field and being added to, such as month-day
int curMonth0 = partial.getValue(0) - 1;
int newMonth = ((curMonth0 + (valueToAdd % 12) + 12) % 12) + 1;
return set(partial, 0, values, newMonth);
}
if (DateTimeUtils.isContiguous(partial)) {
long instant = 0L;
for (int i = 0, isize = partial.size(); i < isize; i++) {
instant = partial.getFieldType(i).getField(iChronology).set(instant, values[i]);
}
instant = add(instant, valueToAdd);
return iChronology.get(partial, instant);
} else {
return super.add(partial, fieldIndex, values, valueToAdd);
}
} | src/main/java/org/joda/time/chrono/BasicMonthOfYearDateTimeField.java |
Time-15 | public static long safeMultiply(long val1, int val2) {
switch (val2) {
case -1:
return -val1;
case 0:
return 0L;
case 1:
return val1;
}
long total = val1 * val2;
if (total / val2 != val1) {
throw new ArithmeticException("Multiplication overflows a long: " + val1 + " * " + val2);
}
return total;
}
public static long safeMultiply(long val1, int val2) {
switch (val2) {
case -1:
if (val1 == Long.MIN_VALUE) {
throw new ArithmeticException("Multiplication overflows a long: " + val1 + " * " + val2);
}
return -val1;
case 0:
return 0L;
case 1:
return val1;
}
long total = val1 * val2;
if (total / val2 != val1) {
throw new ArithmeticException("Multiplication overflows a long: " + val1 + " * " + val2);
}
return total;
} | src/main/java/org/joda/time/field/FieldUtils.java |
Time-16 | public int parseInto(ReadWritableInstant instant, String text, int position) {
DateTimeParser parser = requireParser();
if (instant == null) {
throw new IllegalArgumentException("Instant must not be null");
}
long instantMillis = instant.getMillis();
Chronology chrono = instant.getChronology();
long instantLocal = instantMillis + chrono.getZone().getOffset(instantMillis);
chrono = selectChronology(chrono);
DateTimeParserBucket bucket = new DateTimeParserBucket(
instantLocal, chrono, iLocale, iPivotYear, iDefaultYear);
int newPos = parser.parseInto(bucket, text, position);
instant.setMillis(bucket.computeMillis(false, text));
if (iOffsetParsed && bucket.getOffsetInteger() != null) {
int parsedOffset = bucket.getOffsetInteger();
DateTimeZone parsedZone = DateTimeZone.forOffsetMillis(parsedOffset);
chrono = chrono.withZone(parsedZone);
} else if (bucket.getZone() != null) {
chrono = chrono.withZone(bucket.getZone());
}
instant.setChronology(chrono);
if (iZone != null) {
instant.setZone(iZone);
}
return newPos;
}
public int parseInto(ReadWritableInstant instant, String text, int position) {
DateTimeParser parser = requireParser();
if (instant == null) {
throw new IllegalArgumentException("Instant must not be null");
}
long instantMillis = instant.getMillis();
Chronology chrono = instant.getChronology();
long instantLocal = instantMillis + chrono.getZone().getOffset(instantMillis);
chrono = selectChronology(chrono);
DateTimeParserBucket bucket = new DateTimeParserBucket(
instantLocal, chrono, iLocale, iPivotYear, chrono.year().get(instantLocal));
int newPos = parser.parseInto(bucket, text, position);
instant.setMillis(bucket.computeMillis(false, text));
if (iOffsetParsed && bucket.getOffsetInteger() != null) {
int parsedOffset = bucket.getOffsetInteger();
DateTimeZone parsedZone = DateTimeZone.forOffsetMillis(parsedOffset);
chrono = chrono.withZone(parsedZone);
} else if (bucket.getZone() != null) {
chrono = chrono.withZone(bucket.getZone());
}
instant.setChronology(chrono);
if (iZone != null) {
instant.setZone(iZone);
}
return newPos;
} | src/main/java/org/joda/time/format/DateTimeFormatter.java |
Time-17 | public long adjustOffset(long instant, boolean earlierOrLater) {
// a bit messy, but will work in all non-pathological cases
// evaluate 3 hours before and after to work out if anything is happening
long instantBefore = convertUTCToLocal(instant - 3 * DateTimeConstants.MILLIS_PER_HOUR);
long instantAfter = convertUTCToLocal(instant + 3 * DateTimeConstants.MILLIS_PER_HOUR);
if (instantBefore == instantAfter) {
return instant; // not an overlap (less than is a gap, equal is normal case)
}
// work out range of instants that have duplicate local times
long local = convertUTCToLocal(instant);
return convertLocalToUTC(local, false, earlierOrLater ? instantAfter : instantBefore);
// calculate result
// currently in later offset
// currently in earlier offset
}
public long adjustOffset(long instant, boolean earlierOrLater) {
// a bit messy, but will work in all non-pathological cases
// evaluate 3 hours before and after to work out if anything is happening
long instantBefore = instant - 3 * DateTimeConstants.MILLIS_PER_HOUR;
long instantAfter = instant + 3 * DateTimeConstants.MILLIS_PER_HOUR;
long offsetBefore = getOffset(instantBefore);
long offsetAfter = getOffset(instantAfter);
if (offsetBefore <= offsetAfter) {
return instant; // not an overlap (less than is a gap, equal is normal case)
}
// work out range of instants that have duplicate local times
long diff = offsetBefore - offsetAfter;
long transition = nextTransition(instantBefore);
long overlapStart = transition - diff;
long overlapEnd = transition + diff;
if (instant < overlapStart || instant >= overlapEnd) {
return instant; // not an overlap
}
// calculate result
long afterStart = instant - overlapStart;
if (afterStart >= diff) {
// currently in later offset
return earlierOrLater ? instant : instant - diff;
} else {
// currently in earlier offset
return earlierOrLater ? instant + diff : instant;
}
} | src/main/java/org/joda/time/DateTimeZone.java |
Time-18 | public long getDateTimeMillis(int year, int monthOfYear, int dayOfMonth,
int hourOfDay, int minuteOfHour,
int secondOfMinute, int millisOfSecond)
throws IllegalArgumentException
{
Chronology base;
if ((base = getBase()) != null) {
return base.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
}
// Assume date is Gregorian.
long instant;
instant = iGregorianChronology.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
if (instant < iCutoverMillis) {
// Maybe it's Julian.
instant = iJulianChronology.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
if (instant >= iCutoverMillis) {
// Okay, it's in the illegal cutover gap.
throw new IllegalArgumentException("Specified date does not exist");
}
}
return instant;
}
public long getDateTimeMillis(int year, int monthOfYear, int dayOfMonth,
int hourOfDay, int minuteOfHour,
int secondOfMinute, int millisOfSecond)
throws IllegalArgumentException
{
Chronology base;
if ((base = getBase()) != null) {
return base.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
}
// Assume date is Gregorian.
long instant;
try {
instant = iGregorianChronology.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
} catch (IllegalFieldValueException ex) {
if (monthOfYear != 2 || dayOfMonth != 29) {
throw ex;
}
instant = iGregorianChronology.getDateTimeMillis
(year, monthOfYear, 28,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
if (instant >= iCutoverMillis) {
throw ex;
}
}
if (instant < iCutoverMillis) {
// Maybe it's Julian.
instant = iJulianChronology.getDateTimeMillis
(year, monthOfYear, dayOfMonth,
hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond);
if (instant >= iCutoverMillis) {
// Okay, it's in the illegal cutover gap.
throw new IllegalArgumentException("Specified date does not exist");
}
}
return instant;
} | src/main/java/org/joda/time/chrono/GJChronology.java |
Time-19 | public int getOffsetFromLocal(long instantLocal) {
// get the offset at instantLocal (first estimate)
final int offsetLocal = getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
final long instantAdjusted = instantLocal - offsetLocal;
final int offsetAdjusted = getOffset(instantAdjusted);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offsetAdjusted) {
// we need to ensure that time is always after the DST gap
// this happens naturally for positive offsets, but not for negative
if ((offsetLocal - offsetAdjusted) < 0) {
// if we just return offsetAdjusted then the time is pushed
// back before the transition, whereas it should be
// on or after the transition
long nextLocal = nextTransition(instantAdjusted);
long nextAdjusted = nextTransition(instantLocal - offsetAdjusted);
if (nextLocal != nextAdjusted) {
return offsetLocal;
}
}
} else if (offsetLocal > 0) {
long prev = previousTransition(instantAdjusted);
if (prev < instantAdjusted) {
int offsetPrev = getOffset(prev);
int diff = offsetPrev - offsetLocal;
if (instantAdjusted - prev <= diff) {
return offsetPrev;
}
}
}
return offsetAdjusted;
}
public int getOffsetFromLocal(long instantLocal) {
// get the offset at instantLocal (first estimate)
final int offsetLocal = getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
final long instantAdjusted = instantLocal - offsetLocal;
final int offsetAdjusted = getOffset(instantAdjusted);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offsetAdjusted) {
// we need to ensure that time is always after the DST gap
// this happens naturally for positive offsets, but not for negative
if ((offsetLocal - offsetAdjusted) < 0) {
// if we just return offsetAdjusted then the time is pushed
// back before the transition, whereas it should be
// on or after the transition
long nextLocal = nextTransition(instantAdjusted);
long nextAdjusted = nextTransition(instantLocal - offsetAdjusted);
if (nextLocal != nextAdjusted) {
return offsetLocal;
}
}
} else if (offsetLocal >= 0) {
long prev = previousTransition(instantAdjusted);
if (prev < instantAdjusted) {
int offsetPrev = getOffset(prev);
int diff = offsetPrev - offsetLocal;
if (instantAdjusted - prev <= diff) {
return offsetPrev;
}
}
}
return offsetAdjusted;
} | src/main/java/org/joda/time/DateTimeZone.java |
Time-20 | public int parseInto(DateTimeParserBucket bucket, String text, int position) {
String str = text.substring(position);
for (String id : ALL_IDS) {
if (str.startsWith(id)) {
bucket.setZone(DateTimeZone.forID(id));
return position + id.length();
}
}
return ~position;
}
public int parseInto(DateTimeParserBucket bucket, String text, int position) {
String str = text.substring(position);
String best = null;
for (String id : ALL_IDS) {
if (str.startsWith(id)) {
if (best == null || id.length() > best.length()) {
best = id;
}
}
}
if (best != null) {
bucket.setZone(DateTimeZone.forID(best));
return position + best.length();
}
return ~position;
} | src/main/java/org/joda/time/format/DateTimeFormatterBuilder.java |
Time-22 | protected BasePeriod(long duration) {
this(duration, null, null);
// bug [3264409]
}
protected BasePeriod(long duration) {
super();
// bug [3264409]
iType = PeriodType.time();
int[] values = ISOChronology.getInstanceUTC().get(this, duration);
iType = PeriodType.standard();
iValues = new int[8];
System.arraycopy(values, 0, iValues, 4, 4);
} | src/main/java/org/joda/time/base/BasePeriod.java |
Time-23 | private static synchronized String getConvertedId(String id) {
Map<String, String> map = cZoneIdConversion;
if (map == null) {
// Backwards compatibility with TimeZone.
map = new HashMap<String, String>();
map.put("GMT", "UTC");
map.put("MIT", "Pacific/Apia");
map.put("HST", "Pacific/Honolulu"); // JDK 1.1 compatible
map.put("AST", "America/Anchorage");
map.put("PST", "America/Los_Angeles");
map.put("MST", "America/Denver"); // JDK 1.1 compatible
map.put("PNT", "America/Phoenix");
map.put("CST", "America/Chicago");
map.put("EST", "America/New_York"); // JDK 1.1 compatible
map.put("IET", "America/Indianapolis");
map.put("PRT", "America/Puerto_Rico");
map.put("CNT", "America/St_Johns");
map.put("AGT", "America/Buenos_Aires");
map.put("BET", "America/Sao_Paulo");
map.put("WET", "Europe/London");
map.put("ECT", "Europe/Paris");
map.put("ART", "Africa/Cairo");
map.put("CAT", "Africa/Harare");
map.put("EET", "Europe/Bucharest");
map.put("EAT", "Africa/Addis_Ababa");
map.put("MET", "Asia/Tehran");
map.put("NET", "Asia/Yerevan");
map.put("PLT", "Asia/Karachi");
map.put("IST", "Asia/Calcutta");
map.put("BST", "Asia/Dhaka");
map.put("VST", "Asia/Saigon");
map.put("CTT", "Asia/Shanghai");
map.put("JST", "Asia/Tokyo");
map.put("ACT", "Australia/Darwin");
map.put("AET", "Australia/Sydney");
map.put("SST", "Pacific/Guadalcanal");
map.put("NST", "Pacific/Auckland");
cZoneIdConversion = map;
}
return map.get(id);
}
private static synchronized String getConvertedId(String id) {
Map<String, String> map = cZoneIdConversion;
if (map == null) {
// Backwards compatibility with TimeZone.
map = new HashMap<String, String>();
map.put("GMT", "UTC");
map.put("WET", "WET");
map.put("CET", "CET");
map.put("MET", "CET");
map.put("ECT", "CET");
map.put("EET", "EET");
map.put("MIT", "Pacific/Apia");
map.put("HST", "Pacific/Honolulu"); // JDK 1.1 compatible
map.put("AST", "America/Anchorage");
map.put("PST", "America/Los_Angeles");
map.put("MST", "America/Denver"); // JDK 1.1 compatible
map.put("PNT", "America/Phoenix");
map.put("CST", "America/Chicago");
map.put("EST", "America/New_York"); // JDK 1.1 compatible
map.put("IET", "America/Indiana/Indianapolis");
map.put("PRT", "America/Puerto_Rico");
map.put("CNT", "America/St_Johns");
map.put("AGT", "America/Argentina/Buenos_Aires");
map.put("BET", "America/Sao_Paulo");
map.put("ART", "Africa/Cairo");
map.put("CAT", "Africa/Harare");
map.put("EAT", "Africa/Addis_Ababa");
map.put("NET", "Asia/Yerevan");
map.put("PLT", "Asia/Karachi");
map.put("IST", "Asia/Kolkata");
map.put("BST", "Asia/Dhaka");
map.put("VST", "Asia/Ho_Chi_Minh");
map.put("CTT", "Asia/Shanghai");
map.put("JST", "Asia/Tokyo");
map.put("ACT", "Australia/Darwin");
map.put("AET", "Australia/Sydney");
map.put("SST", "Pacific/Guadalcanal");
map.put("NST", "Pacific/Auckland");
cZoneIdConversion = map;
}
return map.get(id);
} | src/main/java/org/joda/time/DateTimeZone.java |
Time-24 | public long computeMillis(boolean resetFields, String text) {
SavedField[] savedFields = iSavedFields;
int count = iSavedFieldsCount;
if (iSavedFieldsShared) {
iSavedFields = savedFields = (SavedField[])iSavedFields.clone();
iSavedFieldsShared = false;
}
sort(savedFields, count);
if (count > 0) {
// alter base year for parsing if first field is month or day
DurationField months = DurationFieldType.months().getField(iChrono);
DurationField days = DurationFieldType.days().getField(iChrono);
DurationField first = savedFields[0].iField.getDurationField();
if (compareReverse(first, months) >= 0 && compareReverse(first, days) <= 0) {
saveField(DateTimeFieldType.year(), iDefaultYear);
return computeMillis(resetFields, text);
}
}
long millis = iMillis;
try {
for (int i = 0; i < count; i++) {
millis = savedFields[i].set(millis, resetFields);
}
} catch (IllegalFieldValueException e) {
if (text != null) {
e.prependMessage("Cannot parse \"" + text + '"');
}
throw e;
}
if (iZone == null) {
millis -= iOffset;
} else {
int offset = iZone.getOffsetFromLocal(millis);
millis -= offset;
if (offset != iZone.getOffset(millis)) {
String message =
"Illegal instant due to time zone offset transition (" + iZone + ')';
if (text != null) {
message = "Cannot parse \"" + text + "\": " + message;
}
throw new IllegalArgumentException(message);
}
}
return millis;
}
public long computeMillis(boolean resetFields, String text) {
SavedField[] savedFields = iSavedFields;
int count = iSavedFieldsCount;
if (iSavedFieldsShared) {
iSavedFields = savedFields = (SavedField[])iSavedFields.clone();
iSavedFieldsShared = false;
}
sort(savedFields, count);
if (count > 0) {
// alter base year for parsing if first field is month or day
DurationField months = DurationFieldType.months().getField(iChrono);
DurationField days = DurationFieldType.days().getField(iChrono);
DurationField first = savedFields[0].iField.getDurationField();
if (compareReverse(first, months) >= 0 && compareReverse(first, days) <= 0) {
saveField(DateTimeFieldType.year(), iDefaultYear);
return computeMillis(resetFields, text);
}
}
long millis = iMillis;
try {
for (int i = 0; i < count; i++) {
millis = savedFields[i].set(millis, resetFields);
}
if (resetFields) {
for (int i = 0; i < count; i++) {
millis = savedFields[i].set(millis, i == (count - 1));
}
}
} catch (IllegalFieldValueException e) {
if (text != null) {
e.prependMessage("Cannot parse \"" + text + '"');
}
throw e;
}
if (iZone == null) {
millis -= iOffset;
} else {
int offset = iZone.getOffsetFromLocal(millis);
millis -= offset;
if (offset != iZone.getOffset(millis)) {
String message =
"Illegal instant due to time zone offset transition (" + iZone + ')';
if (text != null) {
message = "Cannot parse \"" + text + "\": " + message;
}
throw new IllegalArgumentException(message);
}
}
return millis;
} | src/main/java/org/joda/time/format/DateTimeParserBucket.java |
Time-25 | public int getOffsetFromLocal(long instantLocal) {
// get the offset at instantLocal (first estimate)
final int offsetLocal = getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
final long instantAdjusted = instantLocal - offsetLocal;
final int offsetAdjusted = getOffset(instantAdjusted);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offsetAdjusted) {
// we need to ensure that time is always after the DST gap
// this happens naturally for positive offsets, but not for negative
if ((offsetLocal - offsetAdjusted) < 0) {
// if we just return offsetAdjusted then the time is pushed
// back before the transition, whereas it should be
// on or after the transition
long nextLocal = nextTransition(instantAdjusted);
long nextAdjusted = nextTransition(instantLocal - offsetAdjusted);
if (nextLocal != nextAdjusted) {
return offsetLocal;
}
}
}
return offsetAdjusted;
}
public int getOffsetFromLocal(long instantLocal) {
// get the offset at instantLocal (first estimate)
final int offsetLocal = getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
final long instantAdjusted = instantLocal - offsetLocal;
final int offsetAdjusted = getOffset(instantAdjusted);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offsetAdjusted) {
// we need to ensure that time is always after the DST gap
// this happens naturally for positive offsets, but not for negative
if ((offsetLocal - offsetAdjusted) < 0) {
// if we just return offsetAdjusted then the time is pushed
// back before the transition, whereas it should be
// on or after the transition
long nextLocal = nextTransition(instantAdjusted);
long nextAdjusted = nextTransition(instantLocal - offsetAdjusted);
if (nextLocal != nextAdjusted) {
return offsetLocal;
}
}
} else if (offsetLocal > 0) {
long prev = previousTransition(instantAdjusted);
if (prev < instantAdjusted) {
int offsetPrev = getOffset(prev);
int diff = offsetPrev - offsetLocal;
if (instantAdjusted - prev <= diff) {
return offsetPrev;
}
}
}
return offsetAdjusted;
} | src/main/java/org/joda/time/DateTimeZone.java |
Time-27 | private static PeriodFormatter toFormatter(List<Object> elementPairs, boolean notPrinter, boolean notParser) {
if (notPrinter && notParser) {
throw new IllegalStateException("Builder has created neither a printer nor a parser");
}
int size = elementPairs.size();
if (size >= 2 && elementPairs.get(0) instanceof Separator) {
Separator sep = (Separator) elementPairs.get(0);
PeriodFormatter f = toFormatter(elementPairs.subList(2, size), notPrinter, notParser);
sep = sep.finish(f.getPrinter(), f.getParser());
return new PeriodFormatter(sep, sep);
}
Object[] comp = createComposite(elementPairs);
if (notPrinter) {
return new PeriodFormatter(null, (PeriodParser) comp[1]);
} else if (notParser) {
return new PeriodFormatter((PeriodPrinter) comp[0], null);
} else {
return new PeriodFormatter((PeriodPrinter) comp[0], (PeriodParser) comp[1]);
}
}
private static PeriodFormatter toFormatter(List<Object> elementPairs, boolean notPrinter, boolean notParser) {
if (notPrinter && notParser) {
throw new IllegalStateException("Builder has created neither a printer nor a parser");
}
int size = elementPairs.size();
if (size >= 2 && elementPairs.get(0) instanceof Separator) {
Separator sep = (Separator) elementPairs.get(0);
if (sep.iAfterParser == null && sep.iAfterPrinter == null) {
PeriodFormatter f = toFormatter(elementPairs.subList(2, size), notPrinter, notParser);
sep = sep.finish(f.getPrinter(), f.getParser());
return new PeriodFormatter(sep, sep);
}
}
Object[] comp = createComposite(elementPairs);
if (notPrinter) {
return new PeriodFormatter(null, (PeriodParser) comp[1]);
} else if (notParser) {
return new PeriodFormatter((PeriodPrinter) comp[0], null);
} else {
return new PeriodFormatter((PeriodPrinter) comp[0], (PeriodParser) comp[1]);
}
} | src/main/java/org/joda/time/format/PeriodFormatterBuilder.java |
Time-4 | public Partial with(DateTimeFieldType fieldType, int value) {
if (fieldType == null) {
throw new IllegalArgumentException("The field type must not be null");
}
int index = indexOf(fieldType);
if (index == -1) {
DateTimeFieldType[] newTypes = new DateTimeFieldType[iTypes.length + 1];
int[] newValues = new int[newTypes.length];
// find correct insertion point to keep largest-smallest order
int i = 0;
DurationField unitField = fieldType.getDurationType().getField(iChronology);
if (unitField.isSupported()) {
for (; i < iTypes.length; i++) {
DateTimeFieldType loopType = iTypes[i];
DurationField loopUnitField = loopType.getDurationType().getField(iChronology);
if (loopUnitField.isSupported()) {
int compare = unitField.compareTo(loopUnitField);
if (compare > 0) {
break;
} else if (compare == 0) {
DurationField rangeField = fieldType.getRangeDurationType().getField(iChronology);
DurationField loopRangeField = loopType.getRangeDurationType().getField(iChronology);
if (rangeField.compareTo(loopRangeField) > 0) {
break;
}
}
}
}
}
System.arraycopy(iTypes, 0, newTypes, 0, i);
System.arraycopy(iValues, 0, newValues, 0, i);
newTypes[i] = fieldType;
newValues[i] = value;
System.arraycopy(iTypes, i, newTypes, i + 1, newTypes.length - i - 1);
System.arraycopy(iValues, i, newValues, i + 1, newValues.length - i - 1);
// use public constructor to ensure full validation
// this isn't overly efficient, but is safe
Partial newPartial = new Partial(iChronology, newTypes, newValues);
iChronology.validate(newPartial, newValues);
return newPartial;
}
if (value == getValue(index)) {
return this;
}
int[] newValues = getValues();
newValues = getField(index).set(this, index, newValues, value);
return new Partial(this, newValues);
}
public Partial with(DateTimeFieldType fieldType, int value) {
if (fieldType == null) {
throw new IllegalArgumentException("The field type must not be null");
}
int index = indexOf(fieldType);
if (index == -1) {
DateTimeFieldType[] newTypes = new DateTimeFieldType[iTypes.length + 1];
int[] newValues = new int[newTypes.length];
// find correct insertion point to keep largest-smallest order
int i = 0;
DurationField unitField = fieldType.getDurationType().getField(iChronology);
if (unitField.isSupported()) {
for (; i < iTypes.length; i++) {
DateTimeFieldType loopType = iTypes[i];
DurationField loopUnitField = loopType.getDurationType().getField(iChronology);
if (loopUnitField.isSupported()) {
int compare = unitField.compareTo(loopUnitField);
if (compare > 0) {
break;
} else if (compare == 0) {
DurationField rangeField = fieldType.getRangeDurationType().getField(iChronology);
DurationField loopRangeField = loopType.getRangeDurationType().getField(iChronology);
if (rangeField.compareTo(loopRangeField) > 0) {
break;
}
}
}
}
}
System.arraycopy(iTypes, 0, newTypes, 0, i);
System.arraycopy(iValues, 0, newValues, 0, i);
newTypes[i] = fieldType;
newValues[i] = value;
System.arraycopy(iTypes, i, newTypes, i + 1, newTypes.length - i - 1);
System.arraycopy(iValues, i, newValues, i + 1, newValues.length - i - 1);
// use public constructor to ensure full validation
// this isn't overly efficient, but is safe
Partial newPartial = new Partial(newTypes, newValues, iChronology);
iChronology.validate(newPartial, newValues);
return newPartial;
}
if (value == getValue(index)) {
return this;
}
int[] newValues = getValues();
newValues = getField(index).set(this, index, newValues, value);
return new Partial(this, newValues);
} | src/main/java/org/joda/time/Partial.java |
Time-5 | public Period normalizedStandard(PeriodType type) {
type = DateTimeUtils.getPeriodType(type);
long millis = getMillis(); // no overflow can happen, even with Integer.MAX_VALUEs
millis += (((long) getSeconds()) * ((long) DateTimeConstants.MILLIS_PER_SECOND));
millis += (((long) getMinutes()) * ((long) DateTimeConstants.MILLIS_PER_MINUTE));
millis += (((long) getHours()) * ((long) DateTimeConstants.MILLIS_PER_HOUR));
millis += (((long) getDays()) * ((long) DateTimeConstants.MILLIS_PER_DAY));
millis += (((long) getWeeks()) * ((long) DateTimeConstants.MILLIS_PER_WEEK));
Period result = new Period(millis, type, ISOChronology.getInstanceUTC());
int years = getYears();
int months = getMonths();
if (years != 0 || months != 0) {
years = FieldUtils.safeAdd(years, months / 12);
months = months % 12;
if (years != 0) {
result = result.withYears(years);
}
if (months != 0) {
result = result.withMonths(months);
}
}
return result;
}
public Period normalizedStandard(PeriodType type) {
type = DateTimeUtils.getPeriodType(type);
long millis = getMillis(); // no overflow can happen, even with Integer.MAX_VALUEs
millis += (((long) getSeconds()) * ((long) DateTimeConstants.MILLIS_PER_SECOND));
millis += (((long) getMinutes()) * ((long) DateTimeConstants.MILLIS_PER_MINUTE));
millis += (((long) getHours()) * ((long) DateTimeConstants.MILLIS_PER_HOUR));
millis += (((long) getDays()) * ((long) DateTimeConstants.MILLIS_PER_DAY));
millis += (((long) getWeeks()) * ((long) DateTimeConstants.MILLIS_PER_WEEK));
Period result = new Period(millis, type, ISOChronology.getInstanceUTC());
int years = getYears();
int months = getMonths();
if (years != 0 || months != 0) {
long totalMonths = years * 12L + months;
if (type.isSupported(DurationFieldType.YEARS_TYPE)) {
int normalizedYears = FieldUtils.safeToInt(totalMonths / 12);
result = result.withYears(normalizedYears);
totalMonths = totalMonths - (normalizedYears * 12);
}
if (type.isSupported(DurationFieldType.MONTHS_TYPE)) {
int normalizedMonths = FieldUtils.safeToInt(totalMonths);
result = result.withMonths(normalizedMonths);
totalMonths = totalMonths - normalizedMonths;
}
if (totalMonths != 0) {
throw new UnsupportedOperationException("Unable to normalize as PeriodType is missing either years or months but period has a month/year amount: " + toString());
}
}
return result;
} | src/main/java/org/joda/time/Period.java |
Time-7 | public int parseInto(ReadWritableInstant instant, String text, int position) {
DateTimeParser parser = requireParser();
if (instant == null) {
throw new IllegalArgumentException("Instant must not be null");
}
long instantMillis = instant.getMillis();
Chronology chrono = instant.getChronology();
long instantLocal = instantMillis + chrono.getZone().getOffset(instantMillis);
chrono = selectChronology(chrono);
int defaultYear = chrono.year().get(instantLocal);
DateTimeParserBucket bucket = new DateTimeParserBucket(
instantLocal, chrono, iLocale, iPivotYear, defaultYear);
int newPos = parser.parseInto(bucket, text, position);
instant.setMillis(bucket.computeMillis(false, text));
if (iOffsetParsed && bucket.getOffsetInteger() != null) {
int parsedOffset = bucket.getOffsetInteger();
DateTimeZone parsedZone = DateTimeZone.forOffsetMillis(parsedOffset);
chrono = chrono.withZone(parsedZone);
} else if (bucket.getZone() != null) {
chrono = chrono.withZone(bucket.getZone());
}
instant.setChronology(chrono);
if (iZone != null) {
instant.setZone(iZone);
}
return newPos;
}
public int parseInto(ReadWritableInstant instant, String text, int position) {
DateTimeParser parser = requireParser();
if (instant == null) {
throw new IllegalArgumentException("Instant must not be null");
}
long instantMillis = instant.getMillis();
Chronology chrono = instant.getChronology();
int defaultYear = DateTimeUtils.getChronology(chrono).year().get(instantMillis);
long instantLocal = instantMillis + chrono.getZone().getOffset(instantMillis);
chrono = selectChronology(chrono);
DateTimeParserBucket bucket = new DateTimeParserBucket(
instantLocal, chrono, iLocale, iPivotYear, defaultYear);
int newPos = parser.parseInto(bucket, text, position);
instant.setMillis(bucket.computeMillis(false, text));
if (iOffsetParsed && bucket.getOffsetInteger() != null) {
int parsedOffset = bucket.getOffsetInteger();
DateTimeZone parsedZone = DateTimeZone.forOffsetMillis(parsedOffset);
chrono = chrono.withZone(parsedZone);
} else if (bucket.getZone() != null) {
chrono = chrono.withZone(bucket.getZone());
}
instant.setChronology(chrono);
if (iZone != null) {
instant.setZone(iZone);
}
return newPos;
} | src/main/java/org/joda/time/format/DateTimeFormatter.java |
Time-8 | public static DateTimeZone forOffsetHoursMinutes(int hoursOffset, int minutesOffset) throws IllegalArgumentException {
if (hoursOffset == 0 && minutesOffset == 0) {
return DateTimeZone.UTC;
}
if (hoursOffset < -23 || hoursOffset > 23) {
throw new IllegalArgumentException("Hours out of range: " + hoursOffset);
}
if (minutesOffset < 0 || minutesOffset > 59) {
throw new IllegalArgumentException("Minutes out of range: " + minutesOffset);
}
int offset = 0;
try {
int hoursInMinutes = hoursOffset * 60;
if (hoursInMinutes < 0) {
minutesOffset = hoursInMinutes - minutesOffset;
} else {
minutesOffset = hoursInMinutes + minutesOffset;
}
offset = FieldUtils.safeMultiply(minutesOffset, DateTimeConstants.MILLIS_PER_MINUTE);
} catch (ArithmeticException ex) {
throw new IllegalArgumentException("Offset is too large");
}
return forOffsetMillis(offset);
}
public static DateTimeZone forOffsetHoursMinutes(int hoursOffset, int minutesOffset) throws IllegalArgumentException {
if (hoursOffset == 0 && minutesOffset == 0) {
return DateTimeZone.UTC;
}
if (hoursOffset < -23 || hoursOffset > 23) {
throw new IllegalArgumentException("Hours out of range: " + hoursOffset);
}
if (minutesOffset < -59 || minutesOffset > 59) {
throw new IllegalArgumentException("Minutes out of range: " + minutesOffset);
}
if (hoursOffset > 0 && minutesOffset < 0) {
throw new IllegalArgumentException("Positive hours must not have negative minutes: " + minutesOffset);
}
int offset = 0;
try {
int hoursInMinutes = hoursOffset * 60;
if (hoursInMinutes < 0) {
minutesOffset = hoursInMinutes - Math.abs(minutesOffset);
} else {
minutesOffset = hoursInMinutes + minutesOffset;
}
offset = FieldUtils.safeMultiply(minutesOffset, DateTimeConstants.MILLIS_PER_MINUTE);
} catch (ArithmeticException ex) {
throw new IllegalArgumentException("Offset is too large");
}
return forOffsetMillis(offset);
} | src/main/java/org/joda/time/DateTimeZone.java |