// GcvInitCameraMatrix2D takes one 3-by-N matrix and one 2-by-N Matrix as input. // Each column in the input matrix represents a point in real world (objPts) or // in image (imgPts). // Return: the camera matrix. func GcvInitCameraMatrix2D(objPts, imgPts *mat64.Dense, dims [2]int, aspectRatio float64) (camMat *mat64.Dense) { objDim, nObjPts := objPts.Dims() imgDim, nImgPts := imgPts.Dims() if objDim != 3 || imgDim != 2 || nObjPts != nImgPts { panic("Invalid dimensions for objPts and imgPts") } objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts)) imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts)) for j := 0; j < nObjPts; j++ { objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...)) } for j := 0; j < nObjPts; j++ { imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...)) } _imgSize := NewGcvSize2i(dims[0], dims[1]) camMat = GcvMatToMat64(GcvInitCameraMatrix2D_( objPtsVec, imgPtsVec, _imgSize, aspectRatio)) return camMat }
func TestMarginal(t *testing.T) { for _, test := range []struct { mu []float64 sigma *mat64.SymDense marginal []int }{ { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), marginal: []int{0}, }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), marginal: []int{0, 2}, }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}), marginal: []int{0, 3}, }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, covariance matrix not positive definite") } marginal, ok := normal.MarginalNormal(test.marginal, nil) if !ok { t.Fatalf("Bad test, marginal matrix not positive definite") } dim := normal.Dim() nSamples := 1000000 samps := mat64.NewDense(nSamples, dim, nil) for i := 0; i < nSamples; i++ { normal.Rand(samps.RawRowView(i)) } estMean := make([]float64, dim) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil) } for i, v := range test.marginal { if math.Abs(marginal.mu[i]-estMean[v]) > 1e-2 { t.Errorf("Mean mismatch: want: %v, got %v", estMean[v], marginal.mu[i]) } } marginalCov := marginal.CovarianceMatrix(nil) estCov := stat.CovarianceMatrix(nil, samps, nil) for i, v1 := range test.marginal { for j, v2 := range test.marginal { c := marginalCov.At(i, j) ec := estCov.At(v1, v2) if math.Abs(c-ec) > 5e-2 { t.Errorf("Cov mismatch element i = %d, j = %d: want: %v, got %v", i, j, c, ec) } } } } }
// findLinearlyIndependnt finds a set of linearly independent columns of A, and // returns the column indexes of the linearly independent columns. func findLinearlyIndependent(A mat64.Matrix) []int { m, n := A.Dims() idxs := make([]int, 0, m) columns := mat64.NewDense(m, m, nil) newCol := make([]float64, m) // Walk in reverse order because slack variables are typically the last columns // of A. for i := n - 1; i >= 0; i-- { if len(idxs) == m { break } mat64.Col(newCol, i, A) if len(idxs) == 0 { // A column is linearly independent from the null set. // This is what needs to be changed if zero columns are allowed, as // a column of all zeros is not linearly independent from itself. columns.SetCol(len(idxs), newCol) idxs = append(idxs, i) continue } if linearlyDependent(mat64.NewVector(m, newCol), columns.View(0, 0, m, len(idxs))) { continue } columns.SetCol(len(idxs), newCol) idxs = append(idxs, i) } return idxs }
// linearlyDependent returns whether the vector is linearly dependent // with the columns of A. It assumes that A is a full-rank matrix. func linearlyDependent(vec *mat64.Vector, A mat64.Matrix) bool { // Add vec to the columns of A, and see if the condition number is reasonable. m, n := A.Dims() aNew := mat64.NewDense(m, n+1, nil) aNew.Copy(A) col := mat64.Col(nil, 0, vec) aNew.SetCol(n, col) cond := mat64.Cond(aNew, 1) return cond > 1e12 }
// extractColumns creates a new matrix out of the columns of A specified by cols. // TODO(btracey): Allow this to take a receiver. func extractColumns(A mat64.Matrix, cols []int) *mat64.Dense { r, _ := A.Dims() sub := mat64.NewDense(r, len(cols), nil) col := make([]float64, r) for j, idx := range cols { mat64.Col(col, idx, A) sub.SetCol(j, col) } return sub }
func TestGcvCalibrateCamera(t *testing.T) { objPts := mat64.NewDense(10, 3, []float64{ -1.482676, -1.419348, 1.166475, -0.043819, -0.729445, 1.212821, 0.960825, 1.147328, 0.485541, 1.738245, 0.597865, 1.026016, -0.430206, -1.281281, 0.870726, -1.627323, -2.203264, -0.381758, 0.166347, -0.571246, 0.428893, 0.376266, 0.213996, -0.299131, -0.226950, 0.942377, -0.899869, -1.148912, 0.093725, 0.634745, }) objPts.Clone(objPts.T()) imgPts := mat64.NewDense(10, 2, []float64{ -0.384281, -0.299055, 0.361833, 0.087737, 1.370253, 1.753933, 1.421390, 0.853312, 0.107177, -0.443076, 3.773328, 5.437829, 0.624914, -0.280949, -0.825577, -0.245594, 0.631444, -0.340257, -0.647580, 0.502113, }) imgPts.Clone(imgPts.T()) camMat := GcvInitCameraMatrix2D(objPts, imgPts, [2]int{1920, 1080}, 1) distCoeffs := mat64.NewDense(5, 1, []float64{0, 0, 0, 0, 0}) camMat, rvec, tvec := GcvCalibrateCamera( objPts, imgPts, camMat, distCoeffs, [2]int{1920, 1080}, 14575) assert.InDeltaSlice(t, []float64{-46.15296606, 0., 959.5}, mat64.Row(nil, 0, camMat), DELTA) assert.InDeltaSlice(t, []float64{0., -46.15296606, 539.5}, mat64.Row(nil, 1, camMat), DELTA) assert.InDeltaSlice(t, []float64{0., 0., 1.}, mat64.Row(nil, 2, camMat), DELTA) assert.InDeltaSlice(t, []float64{-0.98405029, -0.93443411, -0.26304667}, mat64.Col(nil, 0, rvec), DELTA) assert.InDeltaSlice(t, []float64{0.6804739, 0.47530207, -0.04833094}, mat64.Col(nil, 0, tvec), DELTA) }
func getColumnVector(index int, M mat.Matrix) *mat.Vector { rows, _ := M.Dims() var colData []float64 if rows == 0 { colData = []float64{} } else { colData = mat.Col(nil, index, M) } return mat.NewVector(rows, colData) }
func TestNormRand(t *testing.T) { for _, test := range []struct { mean []float64 cov []float64 }{ { mean: []float64{0, 0}, cov: []float64{ 1, 0, 0, 1, }, }, { mean: []float64{0, 0}, cov: []float64{ 1, 0.9, 0.9, 1, }, }, { mean: []float64{6, 7}, cov: []float64{ 5, 0.9, 0.9, 2, }, }, } { dim := len(test.mean) cov := mat64.NewSymDense(dim, test.cov) n, ok := NewNormal(test.mean, cov, nil) if !ok { t.Errorf("bad covariance matrix") } nSamples := 1000000 samps := mat64.NewDense(nSamples, dim, nil) for i := 0; i < nSamples; i++ { n.Rand(samps.RawRowView(i)) } estMean := make([]float64, dim) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samps), nil) } if !floats.EqualApprox(estMean, test.mean, 1e-2) { t.Errorf("Mean mismatch: want: %v, got %v", test.mean, estMean) } estCov := stat.CovarianceMatrix(nil, samps, nil) if !mat64.EqualApprox(estCov, cov, 1e-2) { t.Errorf("Cov mismatch: want: %v, got %v", cov, estCov) } } }
func GcvCalibrateCamera(objPts, imgPts, camMat, distCoeffs *mat64.Dense, dims [2]int, flags int) (calCamMat, rvec, tvec *mat64.Dense) { objDim, nObjPts := objPts.Dims() imgDim, nImgPts := imgPts.Dims() if objDim != 3 || imgDim != 2 || nObjPts != nImgPts { panic("Invalid dimensions for objPts and imgPts") } objPtsVec := NewGcvPoint3f32Vector(int64(nObjPts)) imgPtsVec := NewGcvPoint2f32Vector(int64(nObjPts)) for j := 0; j < nObjPts; j++ { objPtsVec.Set(j, NewGcvPoint3f32(mat64.Col(nil, j, objPts)...)) } for j := 0; j < nObjPts; j++ { imgPtsVec.Set(j, NewGcvPoint2f32(mat64.Col(nil, j, imgPts)...)) } _camMat := Mat64ToGcvMat(camMat) _distCoeffs := Mat64ToGcvMat(distCoeffs) _rvec := NewGcvMat() _tvec := NewGcvMat() _imgSize := NewGcvSize2i(dims[0], dims[1]) GcvCalibrateCamera_( objPtsVec, imgPtsVec, _imgSize, _camMat, _distCoeffs, _rvec, _tvec, flags) calCamMat = GcvMatToMat64(_camMat) rvec = GcvMatToMat64(_rvec) tvec = GcvMatToMat64(_tvec) return calCamMat, rvec, tvec }
func TestMat64(t *testing.T) { fm := readFm() dense := fm.Mat64(false, false) compareCol := func(i int, exp []float64) { col := mat64.Col(nil, i, dense) assert.Equal(t, len(col), len(exp)) for i := range exp { assert.Equal(t, col[i], exp[i]) } } compareCol(1, []float64{0, 0, 0, 0, 0, 1, 1, 1}) compareCol(2, []float64{0, 0, 0, 0, 0, 0, 0, 1}) }
// PrincipalComponents returns the principal component direction vectors and // the column variances of the principal component scores, vecs * a, computed // using the singular value decomposition of the input. The input a is an n×d // matrix where each row is an observation and each column represents a variable. // // PrincipalComponents centers the variables but does not scale the variance. // // The slice weights is used to weight the observations. If weights is nil, // each weight is considered to have a value of one, otherwise the length of // weights must match the number of observations or PrincipalComponents will // panic. // // On successful completion, the principal component direction vectors are // returned in vecs as a d×min(n, d) matrix, and the variances are returned in // vars as a min(n, d)-long slice in descending sort order. // // If no singular value decomposition is possible, vecs and vars are returned // nil and ok is returned false. func PrincipalComponents(a mat64.Matrix, weights []float64) (vecs *mat64.Dense, vars []float64, ok bool) { n, d := a.Dims() if weights != nil && len(weights) != n { panic("stat: len(weights) != observations") } centered := mat64.NewDense(n, d, nil) col := make([]float64, n) for j := 0; j < d; j++ { mat64.Col(col, j, a) floats.AddConst(-Mean(col, weights), col) centered.SetCol(j, col) } for i, w := range weights { floats.Scale(math.Sqrt(w), centered.RawRowView(i)) } kind := matrix.SVDFull if n > d { kind = matrix.SVDThin } var svd mat64.SVD ok = svd.Factorize(centered, kind) if !ok { return nil, nil, false } vecs = &mat64.Dense{} vecs.VFromSVD(&svd) if n < d { // Don't retain columns that are not valid direction vectors. vecs.Clone(vecs.View(0, 0, d, n)) } vars = svd.Values(nil) var f float64 if weights == nil { f = 1 / float64(n-1) } else { f = 1 / (floats.Sum(weights) - 1) } for i, v := range vars { vars[i] = f * v * v } return vecs, vars, true }
func TestRejection(t *testing.T) { // Test by finding the expected value of a uniform. dim := 3 bounds := make([]distmv.Bound, dim) for i := 0; i < dim; i++ { min := rand.NormFloat64() max := rand.NormFloat64() if min > max { min, max = max, min } bounds[i].Min = min bounds[i].Max = max } target := distmv.NewUniform(bounds, nil) mu := target.Mean(nil) muImp := make([]float64, dim) sigmaImp := mat64.NewSymDense(dim, nil) for i := 0; i < dim; i++ { sigmaImp.SetSym(i, i, 6) } proposal, ok := distmv.NewNormal(muImp, sigmaImp, nil) if !ok { t.Fatal("bad test, sigma not pos def") } nSamples := 1000 batch := mat64.NewDense(nSamples, dim, nil) weights := make([]float64, nSamples) _, ok = Rejection(batch, target, proposal, 1000, nil) if !ok { t.Error("Bad test, nan samples") } for i := 0; i < dim; i++ { col := mat64.Col(nil, i, batch) ev := stat.Mean(col, weights) if math.Abs(ev-mu[i]) > 1e-2 { t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev) } } }
// computeMove computes how far can be moved replacing each index. The results // are stored into move. func computeMove(move []float64, minIdx int, A mat64.Matrix, ab *mat64.Dense, xb []float64, nonBasicIdx []int) error { // Find ae. col := mat64.Col(nil, nonBasicIdx[minIdx], A) aCol := mat64.NewVector(len(col), col) // d = - Ab^-1 Ae nb, _ := ab.Dims() d := make([]float64, nb) dVec := mat64.NewVector(nb, d) err := dVec.SolveVec(ab, aCol) if err != nil { return ErrLinSolve } floats.Scale(-1, d) for i, v := range d { if math.Abs(v) < dRoundTol { d[i] = 0 } } // If no di < 0, then problem is unbounded. if floats.Min(d) >= 0 { return ErrUnbounded } // move = bhat_i / - d_i, assuming d is negative. bHat := xb // ab^-1 b for i, v := range d { if v >= 0 { move[i] = math.Inf(1) } else { move[i] = bHat[i] / math.Abs(v) } } return nil }
func compareNormal(t *testing.T, want *distmv.Normal, batch *mat64.Dense, weights []float64) { dim := want.Dim() mu := want.Mean(nil) sigma := want.CovarianceMatrix(nil) n, _ := batch.Dims() if weights == nil { weights = make([]float64, n) for i := range weights { weights[i] = 1 } } for i := 0; i < dim; i++ { col := mat64.Col(nil, i, batch) ev := stat.Mean(col, weights) if math.Abs(ev-mu[i]) > 1e-2 { t.Errorf("Mean mismatch: Want %v, got %v", mu[i], ev) } } cov := stat.CovarianceMatrix(nil, batch, weights) if !mat64.EqualApprox(cov, sigma, 1.5e-1) { t.Errorf("Covariance matrix mismatch") } }
func TestCorrelationMatrix(t *testing.T) { for i, test := range []struct { data *mat64.Dense weights []float64 ans *mat64.Dense }{ { data: mat64.NewDense(3, 3, []float64{ 1, 2, 3, 3, 4, 5, 5, 6, 7, }), weights: nil, ans: mat64.NewDense(3, 3, []float64{ 1, 1, 1, 1, 1, 1, 1, 1, 1, }), }, { data: mat64.NewDense(5, 2, []float64{ -2, -4, -1, 2, 0, 0, 1, -2, 2, 4, }), weights: nil, ans: mat64.NewDense(2, 2, []float64{ 1, 0.6, 0.6, 1, }), }, { data: mat64.NewDense(3, 2, []float64{ 1, 1, 2, 4, 3, 9, }), weights: []float64{ 1, 1.5, 1, }, ans: mat64.NewDense(2, 2, []float64{ 1, 0.9868703275903379, 0.9868703275903379, 1, }), }, } { // Make a copy of the data to check that it isn't changing. r := test.data.RawMatrix() d := make([]float64, len(r.Data)) copy(d, r.Data) w := make([]float64, len(test.weights)) if test.weights != nil { copy(w, test.weights) } c := CorrelationMatrix(nil, test.data, test.weights) if !mat64.Equal(c, test.ans) { t.Errorf("%d: expected corr %v, found %v", i, test.ans, c) } if !floats.Equal(d, r.Data) { t.Errorf("%d: data was modified during execution", i) } if !floats.Equal(w, test.weights) { t.Errorf("%d: weights was modified during execution", i) } // compare with call to Covariance _, cols := c.Dims() for ci := 0; ci < cols; ci++ { for cj := 0; cj < cols; cj++ { x := mat64.Col(nil, ci, test.data) y := mat64.Col(nil, cj, test.data) corr := Correlation(x, y, test.weights) if math.Abs(corr-c.At(ci, cj)) > 1e-14 { t.Errorf("CorrMat does not match at (%v, %v). Want %v, got %v.", ci, cj, corr, c.At(ci, cj)) } } } } if !Panics(func() { CorrelationMatrix(nil, mat64.NewDense(5, 2, nil), []float64{}) }) { t.Errorf("CorrelationMatrix did not panic with weight size mismatch") } if !Panics(func() { CorrelationMatrix(mat64.NewDense(1, 1, nil), mat64.NewDense(5, 2, nil), nil) }) { t.Errorf("CorrelationMatrix did not panic with preallocation size mismatch") } if !Panics(func() { CorrelationMatrix(nil, mat64.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) { t.Errorf("CorrelationMatrix did not panic with negative weights") } }
func TestConditionNormal(t *testing.T) { // Uncorrelated values shouldn't influence the updated values. for _, test := range []struct { mu []float64 sigma *mat64.SymDense observed []int values []float64 newMu []float64 newSigma *mat64.SymDense }{ { mu: []float64{2, 3}, sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}), observed: []int{0}, values: []float64{10}, newMu: []float64{3}, newSigma: mat64.NewSymDense(1, []float64{5}), }, { mu: []float64{2, 3}, sigma: mat64.NewSymDense(2, []float64{2, 0, 0, 5}), observed: []int{1}, values: []float64{10}, newMu: []float64{2}, newSigma: mat64.NewSymDense(1, []float64{2}), }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}), observed: []int{1}, values: []float64{10}, newMu: []float64{2, 4}, newSigma: mat64.NewSymDense(2, []float64{2, 0, 0, 10}), }, { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0, 0, 0, 5, 0, 0, 0, 10}), observed: []int{0, 1}, values: []float64{10, 15}, newMu: []float64{4}, newSigma: mat64.NewSymDense(1, []float64{10}), }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 0, 0, 0.5, 5, 0, 0, 0, 0, 10, 2, 0, 0, 2, 3}), observed: []int{0, 1}, values: []float64{10, 15}, newMu: []float64{4, 5}, newSigma: mat64.NewSymDense(2, []float64{10, 2, 2, 3}), }, } { normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Fatalf("Bad test, original sigma not positive definite") } newNormal, ok := normal.ConditionNormal(test.observed, test.values, nil) if !ok { t.Fatalf("Bad test, update failure") } if !floats.EqualApprox(test.newMu, newNormal.mu, 1e-12) { t.Errorf("Updated mean mismatch. Want %v, got %v.", test.newMu, newNormal.mu) } var sigma mat64.SymDense sigma.FromCholesky(&newNormal.chol) if !mat64.EqualApprox(test.newSigma, &sigma, 1e-12) { t.Errorf("Updated sigma mismatch\n.Want:\n% v\nGot:\n% v\n", test.newSigma, sigma) } } // Test bivariate case where the update rule is analytic for _, test := range []struct { mu []float64 std []float64 rho float64 value float64 }{ { mu: []float64{2, 3}, std: []float64{3, 5}, rho: 0.9, value: 1000, }, { mu: []float64{2, 3}, std: []float64{3, 5}, rho: -0.9, value: 1000, }, } { std := test.std rho := test.rho sigma := mat64.NewSymDense(2, []float64{std[0] * std[0], std[0] * std[1] * rho, std[0] * std[1] * rho, std[1] * std[1]}) normal, ok := NewNormal(test.mu, sigma, nil) if !ok { t.Fatalf("Bad test, original sigma not positive definite") } newNormal, ok := normal.ConditionNormal([]int{1}, []float64{test.value}, nil) if !ok { t.Fatalf("Bad test, update failed") } var newSigma mat64.SymDense newSigma.FromCholesky(&newNormal.chol) trueMean := test.mu[0] + rho*(std[0]/std[1])*(test.value-test.mu[1]) if math.Abs(trueMean-newNormal.mu[0]) > 1e-14 { t.Errorf("Mean mismatch. Want %v, got %v", trueMean, newNormal.mu[0]) } trueVar := (1 - rho*rho) * std[0] * std[0] if math.Abs(trueVar-newSigma.At(0, 0)) > 1e-14 { t.Errorf("Std mismatch. Want %v, got %v", trueMean, newNormal.mu[0]) } } // Test via sampling. for _, test := range []struct { mu []float64 sigma *mat64.SymDense observed []int unobserved []int value []float64 }{ // The indices in unobserved must be in ascending order for this test. { mu: []float64{2, 3, 4}, sigma: mat64.NewSymDense(3, []float64{2, 0.5, 3, 0.5, 1, 0.6, 3, 0.6, 10}), observed: []int{0}, unobserved: []int{1, 2}, value: []float64{1.9}, }, { mu: []float64{2, 3, 4, 5}, sigma: mat64.NewSymDense(4, []float64{2, 0.5, 3, 0.1, 0.5, 1, 0.6, 0.2, 3, 0.6, 10, 0.3, 0.1, 0.2, 0.3, 3}), observed: []int{0, 3}, unobserved: []int{1, 2}, value: []float64{1.9, 2.9}, }, } { totalSamp := 4000000 var nSamp int samples := mat64.NewDense(totalSamp, len(test.mu), nil) normal, ok := NewNormal(test.mu, test.sigma, nil) if !ok { t.Errorf("bad test") } sample := make([]float64, len(test.mu)) for i := 0; i < totalSamp; i++ { normal.Rand(sample) isClose := true for i, v := range test.observed { if math.Abs(sample[v]-test.value[i]) > 1e-1 { isClose = false break } } if isClose { samples.SetRow(nSamp, sample) nSamp++ } } if nSamp < 100 { t.Errorf("bad test, not enough samples") continue } samples = samples.View(0, 0, nSamp, len(test.mu)).(*mat64.Dense) // Compute mean and covariance matrix. estMean := make([]float64, len(test.mu)) for i := range estMean { estMean[i] = stat.Mean(mat64.Col(nil, i, samples), nil) } estCov := stat.CovarianceMatrix(nil, samples, nil) // Compute update rule. newNormal, ok := normal.ConditionNormal(test.observed, test.value, nil) if !ok { t.Fatalf("Bad test, update failure") } var subEstMean []float64 for _, v := range test.unobserved { subEstMean = append(subEstMean, estMean[v]) } subEstCov := mat64.NewSymDense(len(test.unobserved), nil) for i := 0; i < len(test.unobserved); i++ { for j := i; j < len(test.unobserved); j++ { subEstCov.SetSym(i, j, estCov.At(test.unobserved[i], test.unobserved[j])) } } for i, v := range subEstMean { if math.Abs(newNormal.mu[i]-v) > 5e-2 { t.Errorf("Mean mismatch. Want %v, got %v.", newNormal.mu[i], v) } } var sigma mat64.SymDense sigma.FromCholesky(&newNormal.chol) if !mat64.EqualApprox(&sigma, subEstCov, 1e-1) { t.Errorf("Covariance mismatch. Want:\n%0.8v\nGot:\n%0.8v\n", subEstCov, sigma) } } }
func TestCovarianceMatrix(t *testing.T) { // An alternative way to test this is to call the Variance and // Covariance functions and ensure that the results are identical. for i, test := range []struct { data *mat64.Dense weights []float64 ans *mat64.Dense }{ { data: mat64.NewDense(5, 2, []float64{ -2, -4, -1, 2, 0, 0, 1, -2, 2, 4, }), weights: nil, ans: mat64.NewDense(2, 2, []float64{ 2.5, 3, 3, 10, }), }, { data: mat64.NewDense(3, 2, []float64{ 1, 1, 2, 4, 3, 9, }), weights: []float64{ 1, 1.5, 1, }, ans: mat64.NewDense(2, 2, []float64{ .8, 3.2, 3.2, 13.142857142857146, }), }, } { // Make a copy of the data to check that it isn't changing. r := test.data.RawMatrix() d := make([]float64, len(r.Data)) copy(d, r.Data) w := make([]float64, len(test.weights)) if test.weights != nil { copy(w, test.weights) } c := CovarianceMatrix(nil, test.data, test.weights) if !mat64.Equal(c, test.ans) { t.Errorf("%d: expected cov %v, found %v", i, test.ans, c) } if !floats.Equal(d, r.Data) { t.Errorf("%d: data was modified during execution", i) } if !floats.Equal(w, test.weights) { t.Errorf("%d: weights was modified during execution", i) } // compare with call to Covariance _, cols := c.Dims() for ci := 0; ci < cols; ci++ { for cj := 0; cj < cols; cj++ { x := mat64.Col(nil, ci, test.data) y := mat64.Col(nil, cj, test.data) cov := Covariance(x, y, test.weights) if math.Abs(cov-c.At(ci, cj)) > 1e-14 { t.Errorf("CovMat does not match at (%v, %v). Want %v, got %v.", ci, cj, cov, c.At(ci, cj)) } } } } if !Panics(func() { CovarianceMatrix(nil, mat64.NewDense(5, 2, nil), []float64{}) }) { t.Errorf("CovarianceMatrix did not panic with weight size mismatch") } if !Panics(func() { CovarianceMatrix(mat64.NewDense(1, 1, nil), mat64.NewDense(5, 2, nil), nil) }) { t.Errorf("CovarianceMatrix did not panic with preallocation size mismatch") } if !Panics(func() { CovarianceMatrix(nil, mat64.NewDense(2, 2, []float64{1, 2, 3, 4}), []float64{1, -1}) }) { t.Errorf("CovarianceMatrix did not panic with negative weights") } }
func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol float64) (float64, []float64, []int, error) { err := verifyInputs(initialBasic, c, A, b) if err != nil { if err == ErrUnbounded { return math.Inf(-1), nil, nil, ErrUnbounded } return math.NaN(), nil, nil, err } m, n := A.Dims() // There is at least one optimal solution to the LP which is at the intersection // to a set of constraint boundaries. For a standard form LP with m variables // and n equality constraints, at least m-n elements of x must equal zero // at optimality. The Simplex algorithm solves the standard-form LP by starting // at an initial constraint vertex and successively moving to adjacent constraint // vertices. At every vertex, the set of non-zero x values is the "basic // feasible solution". The list of non-zero x's are maintained in basicIdxs, // the respective columns of A are in ab, and the actual non-zero values of // x are in xb. // // The LP is equality constrained such that A * x = b. This can be expanded // to // ab * xb + an * xn = b // where ab are the columns of a in the basic set, and an are all of the // other columns. Since each element of xn is zero by definition, this means // that for all feasible solutions xb = ab^-1 * b. // // Before the simplex algorithm can start, an initial feasible solution must // be found. If initialBasic is non-nil a feasible solution has been supplied. // Otherwise the "Phase I" problem must be solved to find an initial feasible // solution. var basicIdxs []int // The indices of the non-zero x values. var ab *mat64.Dense // The subset of columns of A listed in basicIdxs. var xb []float64 // The non-zero elements of x. xb = ab^-1 b if initialBasic != nil { // InitialBasic supplied. Panic if incorrect length or infeasible. if len(initialBasic) != m { panic("lp: incorrect number of initial vectors") } ab = extractColumns(A, initialBasic) xb, err = initializeFromBasic(ab, b) if err != nil { panic(err) } basicIdxs = make([]int, len(initialBasic)) copy(basicIdxs, initialBasic) } else { // No inital basis supplied. Solve the PhaseI problem. basicIdxs, ab, xb, err = findInitialBasic(A, b) if err != nil { return math.NaN(), nil, nil, err } } // basicIdxs contains the indexes for an initial feasible solution, // ab contains the extracted columns of A, and xb contains the feasible // solution. All x not in the basic set are 0 by construction. // nonBasicIdx is the set of nonbasic variables. nonBasicIdx := make([]int, 0, n-m) inBasic := make(map[int]struct{}) for _, v := range basicIdxs { inBasic[v] = struct{}{} } for i := 0; i < n; i++ { _, ok := inBasic[i] if !ok { nonBasicIdx = append(nonBasicIdx, i) } } // cb is the subset of c for the basic variables. an and cn // are the equivalents to ab and cb but for the nonbasic variables. cb := make([]float64, len(basicIdxs)) for i, idx := range basicIdxs { cb[i] = c[idx] } cn := make([]float64, len(nonBasicIdx)) for i, idx := range nonBasicIdx { cn[i] = c[idx] } an := extractColumns(A, nonBasicIdx) bVec := mat64.NewVector(len(b), b) cbVec := mat64.NewVector(len(cb), cb) // Temporary data needed each iteration. (Described later) r := make([]float64, n-m) move := make([]float64, m) // Solve the linear program starting from the initial feasible set. This is // the "Phase 2" problem. // // Algorithm: // 1) Compute the "reduced costs" for the non-basic variables. The reduced // costs are the lagrange multipliers of the constraints. // r = cn - an^T * ab^-T * cb // 2) If all of the reduced costs are positive, no improvement is possible, // and the solution is optimal (xn can only increase because of // non-negativity constraints). Otherwise, the solution can be improved and // one element will be exchanged in the basic set. // 3) Choose the x_n with the most negative value of r. Call this value xe. // This variable will be swapped into the basic set. // 4) Increase xe until the next constraint boundary is met. This will happen // when the first element in xb becomes 0. The distance xe can increase before // a given element in xb becomes negative can be found from // xb = Ab^-1 b - Ab^-1 An xn // = Ab^-1 b - Ab^-1 Ae xe // = bhat + d x_e // xe = bhat_i / - d_i // where Ae is the column of A corresponding to xe. // The constraining basic index is the first index for which this is true, // so remove the element which is min_i (bhat_i / -d_i), assuming d_i is negative. // If no d_i is less than 0, then the problem is unbounded. // 5) If the new xe is 0 (that is, bhat_i == 0), then this location is at // the intersection of several constraints. Use the Bland rule instead // of the rule in step 4 to avoid cycling. for { // Compute reduced costs -- r = cn - an^T ab^-T cb var tmp mat64.Vector err = tmp.SolveVec(ab.T(), cbVec) if err != nil { break } data := make([]float64, n-m) tmp2 := mat64.NewVector(n-m, data) tmp2.MulVec(an.T(), &tmp) floats.SubTo(r, cn, data) // Replace the most negative element in the simplex. If there are no // negative entries then the optimal solution has been found. minIdx := floats.MinIdx(r) if r[minIdx] >= -tol { break } for i, v := range r { if math.Abs(v) < rRoundTol { r[i] = 0 } } // Compute the moving distance. err = computeMove(move, minIdx, A, ab, xb, nonBasicIdx) if err != nil { if err == ErrUnbounded { return math.Inf(-1), nil, nil, ErrUnbounded } break } // Replace the basic index along the tightest constraint. replace := floats.MinIdx(move) if move[replace] <= 0 { replace, minIdx, err = replaceBland(A, ab, xb, basicIdxs, nonBasicIdx, r, move) if err != nil { if err == ErrUnbounded { return math.Inf(-1), nil, nil, ErrUnbounded } break } } // Replace the constrained basicIdx with the newIdx. basicIdxs[replace], nonBasicIdx[minIdx] = nonBasicIdx[minIdx], basicIdxs[replace] cb[replace], cn[minIdx] = cn[minIdx], cb[replace] tmpCol1 := mat64.Col(nil, replace, ab) tmpCol2 := mat64.Col(nil, minIdx, an) ab.SetCol(replace, tmpCol2) an.SetCol(minIdx, tmpCol1) // Compute the new xb. xbVec := mat64.NewVector(len(xb), xb) err = xbVec.SolveVec(ab, bVec) if err != nil { break } } // Found the optimum successfully or died trying. The basic variables get // their values, and the non-basic variables are all zero. opt := floats.Dot(cb, xb) xopt := make([]float64, n) for i, v := range basicIdxs { xopt[v] = xb[i] } return opt, xopt, basicIdxs, err }
// findInitialBasic finds an initial basic solution, and returns the basic // indices, ab, and xb. func findInitialBasic(A mat64.Matrix, b []float64) ([]int, *mat64.Dense, []float64, error) { m, n := A.Dims() basicIdxs := findLinearlyIndependent(A) if len(basicIdxs) != m { return nil, nil, nil, ErrSingular } // It may be that this linearly independent basis is also a feasible set. If // so, the Phase I problem can be avoided. ab := extractColumns(A, basicIdxs) xb, err := initializeFromBasic(ab, b) if err == nil { return basicIdxs, ab, xb, nil } // This set was not feasible. Instead the "Phase I" problem must be solved // to find an initial feasible set of basis. // // Method: Construct an LP whose optimal solution is a feasible solution // to the original LP. // 1) Introduce an artificial variable x_{n+1}. // 2) Let x_j be the most negative element of x_b (largest constraint violation). // 3) Add the artificial variable to A with: // a_{n+1} = b - \sum_{i in basicIdxs} a_i + a_j // swap j with n+1 in the basicIdxs. // 4) Define a new LP: // minimize x_{n+1} // subject to [A A_{n+1}][x_1 ... x_{n+1}] = b // x, x_{n+1} >= 0 // 5) Solve this LP. If x_{n+1} != 0, then the problem is infeasible, otherwise // the found basis can be used as an initial basis for phase II. // // The extra column in Step 3 is defined such that the vector of 1s is an // initial feasible solution. // Find the largest constraint violator. // Compute a_{n+1} = b - \sum{i in basicIdxs}a_i + a_j. j is in basicIDx, so // instead just subtract the basicIdx columns that are not minIDx. minIdx := floats.MinIdx(xb) aX1 := make([]float64, m) copy(aX1, b) col := make([]float64, m) for i, v := range basicIdxs { if i == minIdx { continue } mat64.Col(col, v, A) floats.Sub(aX1, col) } // Construct the new LP. // aNew = [A, a_{n+1}] // bNew = b // cNew = 1 for x_{n+1} aNew := mat64.NewDense(m, n+1, nil) aNew.Copy(A) aNew.SetCol(n, aX1) basicIdxs[minIdx] = n // swap minIdx with n in the basic set. c := make([]float64, n+1) c[n] = 1 // Solve the Phase 2 linear program. _, xOpt, newBasic, err := simplex(basicIdxs, c, aNew, b, 1e-10) if err != nil { return nil, nil, nil, errors.New(fmt.Sprintf("lp: error finding feasible basis: %s", err)) } // If n+1 is part of the solution basis then the problem is infeasible. If // not, then the problem is feasible and newBasic is an initial feasible // solution. if math.Abs(xOpt[n]) > phaseIZeroTol { return nil, nil, nil, ErrInfeasible } // The value is zero. First, see if it's not in the basis (feasible solution). basicIdx := -1 basicMap := make(map[int]struct{}) for i, v := range newBasic { if v == n { basicIdx = i } basicMap[v] = struct{}{} xb[i] = xOpt[v] } if basicIdx == -1 { // Not in the basis. ab = extractColumns(A, newBasic) return newBasic, ab, xb, nil } // The value is zero, but it's in the basis. See if swapping in another column // finds a feasible solution. for i := range xOpt { if _, inBasic := basicMap[i]; inBasic { continue } newBasic[basicIdx] = i ab := extractColumns(A, newBasic) xb, err := initializeFromBasic(ab, b) if err == nil { return newBasic, ab, xb, nil } } return nil, nil, nil, ErrInfeasible }