func (g *matrixFs) Gf(x, y *matrix.FloatMatrix, alpha, beta float64, trans linalg.Option) error { // minor := 0 if !checkpnt.MinorEmpty() { minor = checkpnt.MinorTop() } else { loopg += 1 minor = loopg } checkpnt.Check("00-Gfunc", minor) m, n := g.A.Size() y.Scale(beta) // x_n = x[:n] //x_n := matrix.FloatVector(x.FloatArray()[:n]) x_n := x.SubMatrix(0, 0, n, 1).Copy() // x_n_2n = x[n:2*n] //x_n_2n := matrix.FloatVector(x.FloatArray()[n : 2*n]) x_n_2n := x.SubMatrix(n, 0, n, 1).Copy() if linalg.Equal(trans, linalg.OptNoTrans) { // y += alpha * G * x // y[:n] += alpha * (x[:n] - x[n:2*n]) y_n := matrix.Minus(x_n, x_n_2n).Scale(alpha) y.SubMatrix(0, 0, n, 1).Plus(y_n) //y.AddIndexes(matrix.Indexes(n), y_n.FloatArray()) // y[n:2*n] += alpha * (-x[:n] - x[n:2*n]) = -alpha * (x[:n]+x[n:2*n]) y_n = matrix.Plus(x_n, x_n_2n).Scale(-alpha) y.SubMatrix(n, 0, n, 1).Plus(y_n) //y.AddIndexes(matrix.Indexes(n, 2*n), y_n.FloatArray()) // y[2*n+1:] += -alpha * A * x[:n] y_2n := matrix.Times(g.A, x_n).Scale(-alpha) //y.AddIndexes(matrix.Indexes(2*n+1, y.NumElements()), y_2n.FloatArray()) y.SubMatrix(2*n+1, 0, y_2n.NumElements(), 1).Plus(y_2n) } else { // x_m = x[-m:] //x_m := matrix.FloatVector(x.FloatArray()[x.NumElements()-m:]) x_m := x.SubMatrix(x.NumElements()-m, 0) // x_tmp = (x[:n] - x[n:2*n] - A.T * x[-m:]) x_tmp := matrix.Minus(x_n, x_n_2n, matrix.Times(g.A.Transpose(), x_m)) // y[:n] += alpha * (x[:n] - x[n:2*n] - A.T * x[-m:]) //y.AddIndexes(matrix.Indexes(n), x_tmp.Scale(alpha).FloatArray()) y.SubMatrix(0, 0, n, 1).Plus(x_tmp.Scale(alpha)) x_tmp = matrix.Plus(x_n, x_n_2n).Scale(-alpha) //y.AddIndexes(matrix.Indexes(n, y.NumElements()), x_tmp.FloatArray()) y.SubMatrix(n, 0).Plus(x_tmp) } checkpnt.Check("10-Gfunc", minor) return nil }
func TestAcent(t *testing.T) { // matrix string in row order presentation Adata := [][]float64{ []float64{-7.44e-01, 1.11e-01, 1.29e+00, 2.62e+00, -1.82e+00}, []float64{4.59e-01, 7.06e-01, 3.16e-01, -1.06e-01, 7.80e-01}, []float64{-2.95e-02, -2.22e-01, -2.07e-01, -9.11e-01, -3.92e-01}, []float64{-7.75e-01, 1.03e-01, -1.22e+00, -5.74e-01, -3.32e-01}, []float64{-1.80e+00, 1.24e+00, -2.61e+00, -9.31e-01, -6.38e-01}} bdata := []float64{ 8.38e-01, 9.92e-01, 9.56e-01, 6.14e-01, 6.56e-01, 3.57e-01, 6.36e-01, 5.08e-01, 8.81e-03, 7.08e-02} // these are solution obtained from running cvxopt acent.py with above data solData := []float64{-11.59728373909344512, -1.35196389161339936, 7.21894899350256303, -3.29159917142051528, 4.90454147385329176} ntData := []float64{ 1.5163484265903457, 1.2433928210771914, 1.0562922103520955, 0.8816246051011607, 0.7271128861543598, 0.42725003346248974, 0.0816777301914883, 0.0005458037072843131, 1.6259980735305693e-10} b := matrix.FloatVector(bdata) Al := matrix.FloatMatrixFromTable(Adata, matrix.RowOrder) Au := matrix.Scale(Al, -1.0) A := matrix.FloatZeros(2*Al.Rows(), Al.Cols()) A.SetSubMatrix(0, 0, Al) A.SetSubMatrix(Al.Rows(), 0, Au) x, nt, err := acent(A, b, 10) if err != nil { t.Logf("Acent error: %s", err) t.Fail() } solref := matrix.FloatVector(solData) ntref := matrix.FloatVector(ntData) soldf := matrix.Minus(x, solref) ntdf := matrix.Minus(matrix.FloatVector(nt), ntref) solNrm := blas.Nrm2Float(soldf) ntNrm := blas.Nrm2Float(ntdf) t.Logf("x [diff=%.2e]:\n%v\n", solNrm, x) t.Logf("nt [diff=%.2e]:\n%v\n", ntNrm, nt) if solNrm > TOL { t.Log("solution deviates too much from expected\n") t.Fail() } }
func TestUpdateTrmMV(t *testing.T) { //bM := 5 bN := 8 //bP := 4 nb := 4 X := matrix.FloatNormal(bN, 1) //B := matrix.FloatNormal(bP, bN) Y := X.Copy() C0 := matrix.FloatZeros(bN, bN) C2 := matrix.FloatZeros(bN, bN) C1 := matrix.FloatZeros(bN, bN) Xr := X.FloatArray() Yr := Y.FloatArray() C1r := C1.FloatArray() C0r := C0.FloatArray() C2r := C2.FloatArray() // no transpose DRankMV(C1r, Xr, Yr, 1.0, C1.LeadingIndex(), 1, 1, 0, bN, 0, bN, nb, nb) DTrmUpdMV(C0r, Xr, Yr, 1.0, LOWER, C0.LeadingIndex(), 1, 1, 0, bN, nb) DTrmUpdMV(C2r, Xr, Yr, 1.0, UPPER, C2.LeadingIndex(), 1, 1, 0, bN, nb) t.Logf("C1:\n%v\nC0:\n%v\nC2:\n%v\n", C1, C0, C2) // C0 == C2.T t.Logf("C0 == C2.T: %v\n", C0.AllClose(C2.Transpose())) // C1 == C1 - C2 + C0.T Cn := matrix.Minus(C1, C2) Cn.Plus(C0.Transpose()) t.Logf("C1 == C1 - C2 + C0.T: %v\n", Cn.AllClose(C1)) }
func (m *mVariable) Verify(dataline string) float64 { refdata, err := matrix.FloatParse(dataline) if err != nil { fmt.Printf("parse error: %s", err) return 0.0 } return blas.Nrm2Float(matrix.Minus(m.mtx, refdata)) }
func (m *mVariable) ShowError(dataline string) { refdata, err := matrix.FloatParse(dataline) if err != nil { fmt.Printf("parse error: %s", err) return } df := matrix.Minus(m.mtx, refdata) emtx, _ := matrix.FloatMatrixStacked(matrix.StackRight, m.mtx, refdata, df) fmt.Printf("my data | ref.data | diff \n%v\n", emtx.ToString(spformat)) }
func (p *floorPlan) F1(x *matrix.FloatMatrix) (f, Df *matrix.FloatMatrix, err error) { err = nil mn := x.Min(-1, -2, -3, -4, -5) if mn <= 0.0 { f, Df = nil, nil return } zeros := matrix.FloatZeros(5, 12) dk1 := matrix.FloatDiagonal(5, -1.0) dk2 := matrix.FloatZeros(5, 5) x17 := matrix.FloatVector(x.FloatArray()[17:]) // -( Amin ./ (x17 .* x17) ) diag := matrix.Div(p.Amin, matrix.Mul(x17, x17)).Scale(-1.0) dk2.SetIndexesFromArray(diag.FloatArray(), matrix.MakeDiagonalSet(5)...) Df, _ = matrix.FloatMatrixStacked(matrix.StackRight, zeros, dk1, dk2) x12 := matrix.FloatVector(x.FloatArray()[12:17]) // f = -x[12:17] + div(Amin, x[17:]) == div(Amin, x[17:]) - x[12:17] f = matrix.Minus(matrix.Div(p.Amin, x17), x12) return }
func TestUpdateTrmLower(t *testing.T) { //bM := 5 bN := 8 bP := 4 nb := 4 A := matrix.FloatNormal(bN, bP) //B := matrix.FloatNormal(bP, bN) B := A.Transpose() C0 := matrix.FloatZeros(bN, bN) C2 := matrix.FloatZeros(bN, bN) C1 := matrix.FloatZeros(bN, bN) Ar := A.FloatArray() Br := B.FloatArray() C1r := C1.FloatArray() C0r := C0.FloatArray() C2r := C2.FloatArray() // no transpose DMult(C1r, Ar, Br, 2.0, 1.0, NOTRANS, C1.LeadingIndex(), A.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, 0, bN, nb, nb, nb) DTrmUpdBlk(C0r, Ar, Br, 2.0, 1.0, LOWER, C0.LeadingIndex(), A.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, nb, nb) DTrmUpdBlk(C2r, Ar, Br, 2.0, 1.0, UPPER, C2.LeadingIndex(), A.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, nb, nb) //t.Logf("C1:\n%v\nC0:\n%v\nC2:\n%v\n", C1, C0, C2) // C0 == C2.T t.Logf("C0 == C2.T: %v\n", C0.AllClose(C2.Transpose())) // C1 == C1 - C2 + C0.T Cn := matrix.Minus(C1, C2) Cn.Plus(C0.Transpose()) t.Logf("C1 == C1 - C2 + C0.T: %v\n", Cn.AllClose(C1)) // B == A.T DMult(C1r, Ar, Ar, 2.0, 0.0, TRANSB, C1.LeadingIndex(), A.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, 0, bN, nb, nb, nb) DTrmUpdBlk(C0r, Ar, Ar, 2.0, 0.0, LOWER|TRANSB, C0.LeadingIndex(), A.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, nb, nb) DTrmUpdBlk(C2r, Ar, Ar, 2.0, 0.0, UPPER|TRANSB, C2.LeadingIndex(), A.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, nb, nb) //t.Logf("TRANSB:\nC1:\n%v\nC0:\n%v\nC2:\n%v\n", C1, C0, C2) // C0 == C2.T t.Logf("B.T: C0 == C2.T: %v\n", C0.AllClose(C2.Transpose())) // C1 == C1 - C2 + C0.T Cn = matrix.Minus(C1, C2) Cn.Plus(C0.Transpose()) t.Logf("B.T: C1 == C1 - C2 + C0.T: %v\n", Cn.AllClose(C1)) // A == B.T DMult(C1r, Br, Br, 2.0, 0.0, TRANSA, C1.LeadingIndex(), B.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, 0, bN, nb, nb, nb) DTrmUpdBlk(C0r, Br, Br, 2.0, 0.0, LOWER|TRANSA, C0.LeadingIndex(), B.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, nb, nb) DTrmUpdBlk(C2r, Br, Br, 2.0, 0.0, UPPER|TRANSA, C2.LeadingIndex(), B.LeadingIndex(), B.LeadingIndex(), bP, 0, bN, nb, nb) //t.Logf("TRANSA:\nC1:\n%v\nC0:\n%v\nC2:\n%v\n", C1, C0, C2) // C0 == C2.T t.Logf("A.T: C0 == C2.T: %v\n", C0.AllClose(C2.Transpose())) // C1 == C1 - C2 + C0.T Cn = matrix.Minus(C1, C2) Cn.Plus(C0.Transpose()) t.Logf("A.T: C1 == C1 - C2 + C0.T: %v\n", Cn.AllClose(C1)) // A == B.T, B == A.T DMult(C1r, Br, Ar, 2.0, 0.0, TRANSA|TRANSB, C1.LeadingIndex(), B.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, 0, bN, nb, nb, nb) DTrmUpdBlk(C0r, Br, Ar, 2.0, 0.0, LOWER|TRANSA|TRANSB, C0.LeadingIndex(), B.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, nb, nb) DTrmUpdBlk(C2r, Br, Ar, 2.0, 0.0, UPPER|TRANSA|TRANSB, C2.LeadingIndex(), B.LeadingIndex(), A.LeadingIndex(), bP, 0, bN, nb, nb) //t.Logf("TRANSA|TRANSB:\nC1:\n%v\nC0:\n%v\nC2:\n%v\n", C1, C0, C2) // C0 == C2.T t.Logf("A.T, B.T: C0 == C2.T: %v\n", C0.AllClose(C2.Transpose())) // C1 == C1 - C2 + C0.T Cn = matrix.Minus(C1, C2) Cn.Plus(C0.Transpose()) t.Logf("A.T, B.T: C1 == C1 - C2 + C0.T: %v\n", Cn.AllClose(C1)) }
// Computes analytic center of A*x <= b with A m by n of rank n. // We assume that b > 0 and the feasible set is bounded. func Acent(A, b *matrix.FloatMatrix, niters int) (*matrix.FloatMatrix, []float64) { if niters <= 0 { niters = MAXITERS } ntdecrs := make([]float64, 0, niters) if A.Rows() != b.Rows() { return nil, nil } m, n := A.Size() x := matrix.FloatZeros(n, 1) H := matrix.FloatZeros(n, n) // Helper m*n matrix Dmn := matrix.FloatZeros(m, n) for i := 0; i < niters; i++ { // Gradient is g = A^T * (1.0/(b - A*x)). d = 1.0/(b - A*x) // d is m*1 matrix, g is n*1 matrix d := matrix.Minus(b, matrix.Times(A, x)).Inv() g := matrix.Times(A.Transpose(), d) // Hessian is H = A^T * diag(1./(b-A*x))^2 * A. // in the original python code expression d[:,n*[0]] creates // a m*n matrix where each column is copy of column 0. // We do it here manually. for i := 0; i < n; i++ { Dmn.SetColumn(i, d) } // Function mul creates element wise product of matrices. Asc := matrix.Mul(Dmn, A) blas.SyrkFloat(Asc, H, 1.0, 0.0, linalg.OptTrans) // Newton step is v = H^-1 * g. v := g.Copy().Scale(-1.0) lapack.PosvFloat(H, v) // Directional derivative and Newton decrement. lam := blas.DotFloat(g, v) ntdecrs = append(ntdecrs, math.Sqrt(-lam)) if ntdecrs[len(ntdecrs)-1] < TOL { fmt.Printf("last Newton decrement < TOL(%v)\n", TOL) return x, ntdecrs } // Backtracking line search. // y = d .* A*v y := d.Mul(A.Times(v)) step := 1.0 for 1-step*y.Max() < 0 { step *= BETA } search: for { // t = -step*y t := y.Copy().Scale(-step) // t = (1 + t) [e.g. t = 1 - step*y] t.Add(1.0) // ts = sum(log(1-step*y)) ts := t.Log().Sum() if -ts < ALPHA*step*lam { break search } step *= BETA } v.Scale(step) x = x.Plus(v) } // no solution !! fmt.Printf("Iteration %d exhausted\n", niters) return x, ntdecrs }
func main() { m := 6 Vdata := [][]float64{ []float64{1.0, -1.0, -2.0, -2.0, 0.0, 1.5, 1.0}, []float64{1.0, 2.0, 1.0, -1.0, -2.0, -1.0, 1.0}} V := matrix.FloatMatrixFromTable(Vdata, matrix.RowOrder) // V[1, :m] - V[1,1:] a0 := matrix.Minus(V.GetSubMatrix(1, 0, 1, m), V.GetSubMatrix(1, 1, 1)) // V[0, :m] - V[0,1:] a1 := matrix.Minus(V.GetSubMatrix(0, 0, 1, m), V.GetSubMatrix(0, 1, 1)) A0, _ := matrix.FloatMatrixStacked(matrix.StackDown, a0.Scale(-1.0), a1) A0 = A0.Transpose() b0 := matrix.Mul(A0, V.GetSubMatrix(0, 0, 2, m).Transpose()) b0 = matrix.Times(b0, matrix.FloatWithValue(2, 1, 1.0)) A := make([]*matrix.FloatMatrix, 0) b := make([]*matrix.FloatMatrix, 0) A = append(A, A0) b = append(b, b0) // List of symbols C := make([]*matrix.FloatMatrix, 0) C = append(C, matrix.FloatZeros(2, 1)) var row *matrix.FloatMatrix = nil for k := 0; k < m; k++ { row = A0.GetRow(k, row) nrm := blas.Nrm2Float(row) row.Scale(2.0 * b0.GetIndex(k) / (nrm * nrm)) C = append(C, row.Transpose()) } // Voronoi set around C[1] A1 := matrix.FloatZeros(3, 2) A1.SetSubMatrix(0, 0, A0.GetSubMatrix(0, 0, 1).Scale(-1.0)) A1.SetSubMatrix(1, 0, matrix.Minus(C[m], C[1]).Transpose()) A1.SetSubMatrix(2, 0, matrix.Minus(C[2], C[1]).Transpose()) b1 := matrix.FloatZeros(3, 1) b1.SetIndex(0, -b0.GetIndex(0)) v := matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[m], C[1])).Float() * 0.5 b1.SetIndex(1, v) v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[2], C[1])).Float() * 0.5 b1.SetIndex(2, v) A = append(A, A1) b = append(b, b1) // Voronoi set around C[2] ... C[5] for k := 2; k < 6; k++ { A1 = matrix.FloatZeros(3, 2) A1.SetSubMatrix(0, 0, A0.GetSubMatrix(k-1, 0, 1).Scale(-1.0)) A1.SetSubMatrix(1, 0, matrix.Minus(C[k-1], C[k]).Transpose()) A1.SetSubMatrix(2, 0, matrix.Minus(C[k+1], C[k]).Transpose()) b1 = matrix.FloatZeros(3, 1) b1.SetIndex(0, -b0.GetIndex(k-1)) v := matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[k-1], C[k])).Float() * 0.5 b1.SetIndex(1, v) v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[k+1], C[k])).Float() * 0.5 b1.SetIndex(2, v) A = append(A, A1) b = append(b, b1) } // Voronoi set around C[6] A1 = matrix.FloatZeros(3, 2) A1.SetSubMatrix(0, 0, A0.GetSubMatrix(5, 0, 1).Scale(-1.0)) A1.SetSubMatrix(1, 0, matrix.Minus(C[1], C[6]).Transpose()) A1.SetSubMatrix(2, 0, matrix.Minus(C[5], C[6]).Transpose()) b1 = matrix.FloatZeros(3, 1) b1.SetIndex(0, -b0.GetIndex(5)) v = matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[1], C[6])).Float() * 0.5 b1.SetIndex(1, v) v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[5], C[6])).Float() * 0.5 b1.SetIndex(2, v) A = append(A, A1) b = append(b, b1) P := matrix.FloatIdentity(2) q := matrix.FloatZeros(2, 1) solopts := &cvx.SolverOptions{ShowProgress: false, MaxIter: 30} ovals := make([]float64, 0) for k := 1; k < 7; k++ { sol, err := cvx.Qp(P, q, A[k], b[k], nil, nil, solopts, nil) _ = err x := sol.Result.At("x")[0] ovals = append(ovals, math.Pow(blas.Nrm2Float(x), 2.0)) } optvals := matrix.FloatVector(ovals) //fmt.Printf("optvals=\n%v\n", optvals) rangeFunc := func(n int) []float64 { r := make([]float64, 0) for i := 0; i < n; i++ { r = append(r, float64(i)) } return r } nopts := 200 sigmas := matrix.FloatVector(rangeFunc(nopts)) sigmas.Scale((0.5 - 0.2) / float64(nopts)).Add(0.2) bndsVal := func(sigma float64) float64 { // 1.0 - sum(exp( -optvals/(2*sigma**2))) return 1.0 - matrix.Exp(matrix.Scale(optvals, -1.0/(2*sigma*sigma))).Sum() } bnds := matrix.FloatZeros(sigmas.NumElements(), 1) for j, v := range sigmas.FloatArray() { bnds.SetIndex(j, bndsVal(v)) } plotData("plot.png", sigmas.FloatArray(), bnds.FloatArray()) }
func errorToRef(ref, val *matrix.FloatMatrix) (nrm float64, diff *matrix.FloatMatrix) { diff = matrix.Minus(ref, val) nrm = blas.Nrm2(diff).Float() return }
func qcl1(A, b *matrix.FloatMatrix) (*cvx.Solution, error) { // Returns the solution u, z of // // (primal) minimize || u ||_1 // subject to || A * u - b ||_2 <= 1 // // (dual) maximize b^T z - ||z||_2 // subject to || A'*z ||_inf <= 1. // // Exploits structure, assuming A is m by n with m >= n. m, n := A.Size() Fkkt := func(W *sets.FloatMatrixSet) (f cvx.KKTFunc, err error) { minor := 0 if !checkpnt.MinorEmpty() { minor = checkpnt.MinorTop() } err = nil f = nil beta := W.At("beta")[0].GetIndex(0) v := W.At("v")[0] // As = 2 * v *(v[1:].T * A) //v_1 := matrix.FloatNew(1, v.NumElements()-1, v.FloatArray()[1:]) v_1 := v.SubMatrix(1, 0).Transpose() As := matrix.Times(v, matrix.Times(v_1, A)).Scale(2.0) //As_1 := As.GetSubMatrix(1, 0, m, n) //As_1.Scale(-1.0) //As.SetSubMatrix(1, 0, matrix.Minus(As_1, A)) As_1 := As.SubMatrix(1, 0, m, n) As_1.Scale(-1.0) As_1.Minus(A) As.Scale(1.0 / beta) S := matrix.Times(As.Transpose(), As) checkpnt.AddMatrixVar("S", S) d1 := W.At("d")[0].SubMatrix(0, 0, n, 1).Copy() d2 := W.At("d")[0].SubMatrix(n, 0).Copy() // D = 4.0 * (d1**2 + d2**2)**-1 d := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)).Inv().Scale(4.0) // S[::n+1] += d S.Diag().Plus(d.Transpose()) err = lapack.Potrf(S) checkpnt.Check("00-Fkkt", minor) if err != nil { return } f = func(x, y, z *matrix.FloatMatrix) (err error) { minor := 0 if !checkpnt.MinorEmpty() { minor = checkpnt.MinorTop() } else { loopf += 1 minor = loopf } checkpnt.Check("00-f", minor) // -- z := - W**-T * z // z[:n] = -div( z[:n], d1 ) z_val := z.SubMatrix(0, 0, n, 1) z_res := matrix.Div(z_val, d1).Scale(-1.0) z.SubMatrix(0, 0, n, 1).Set(z_res) // z[n:2*n] = -div( z[n:2*n], d2 ) z_val = z.SubMatrix(n, 0, n, 1) z_res = matrix.Div(z_val, d2).Scale(-1.0) z.SubMatrix(n, 0, n, 1).Set(z_res) // z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) v0_z2n := v.GetIndex(0) * z.GetIndex(2*n) v1_z2n := blas.DotFloat(v, z, &linalg.IOpt{"offsetx", 1}, &linalg.IOpt{"offsety", 2*n + 1}) z_res = matrix.Scale(v, -2.0*(v0_z2n-v1_z2n)) z.SubMatrix(2*n, 0, z_res.NumElements(), 1).Plus(z_res) // z[2*n+1:] *= -1.0 z.SubMatrix(2*n+1, 0).Scale(-1.0) // z[2*n:] /= beta z.SubMatrix(2*n, 0).Scale(1.0 / beta) // -- x := x - G' * W**-1 * z // z_n = z[:n], z_2n = z[n:2*n], z_m = z[-(m+1):], z_n := z.SubMatrix(0, 0, n, 1) z_2n := z.SubMatrix(n, 0, n, 1) z_m := z.SubMatrix(z.NumElements()-(m+1), 0) // x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):] z_res = matrix.Minus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2)) a_res := matrix.Times(As.Transpose(), z_m) z_res.Plus(a_res).Scale(-1.0) x.SubMatrix(0, 0, n, 1).Plus(z_res) // x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) z_res = matrix.Plus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2)) x.SubMatrix(n, 0, z_res.NumElements(), 1).Plus(z_res) checkpnt.Check("15-f", minor) // Solve for x[:n]: // // S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] // w1 = (d1**2 - d2**2), w2 = (d1**2 + d2**2) w1 := matrix.Minus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)) w2 := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)) // x[:n] += -mul( div(w1, w2), x[n:]) x_n := x.SubMatrix(n, 0) x_val := matrix.Mul(matrix.Div(w1, w2), x_n).Scale(-1.0) x.SubMatrix(0, 0, n, 1).Plus(x_val) checkpnt.Check("25-f", minor) // Solve for x[n:]: // // (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] err = lapack.Potrs(S, x) if err != nil { fmt.Printf("Potrs error: %s\n", err) } checkpnt.Check("30-f", minor) // Solve for x[n:]: // // (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] // w1 = (d1**-2 - d2**-2), w2 = (d1**-2 + d2**-2) w1 = matrix.Minus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv()) w2 = matrix.Plus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv()) x_n = x.SubMatrix(0, 0, n, 1) // x[n:] += mul( d1**-2 - d2**-2, x[:n]) x_val = matrix.Mul(w1, x_n) x.SubMatrix(n, 0, x_val.NumElements(), 1).Plus(x_val) checkpnt.Check("35-f", minor) // x[n:] = div( x[n:], d1**-2 + d2**-2) x_n = x.SubMatrix(n, 0) x_val = matrix.Div(x_n, w2) x.SubMatrix(n, 0, x_val.NumElements(), 1).Set(x_val) checkpnt.Check("40-f", minor) // x_n = x[:n], x-2n = x[n:2*n] x_n = x.SubMatrix(0, 0, n, 1) x_2n := x.SubMatrix(n, 0, n, 1) // z := z + W^-T * G*x // z[:n] += div( x[:n] - x[n:2*n], d1) x_val = matrix.Div(matrix.Minus(x_n, x_2n), d1) z.SubMatrix(0, 0, n, 1).Plus(x_val) checkpnt.Check("44-f", minor) // z[n:2*n] += div( -x[:n] - x[n:2*n], d2) x_val = matrix.Div(matrix.Plus(x_n, x_2n).Scale(-1.0), d2) z.SubMatrix(n, 0, n, 1).Plus(x_val) checkpnt.Check("48-f", minor) // z[2*n:] += As*x[:n] x_val = matrix.Times(As, x_n) z.SubMatrix(2*n, 0, x_val.NumElements(), 1).Plus(x_val) checkpnt.Check("50-f", minor) return nil } return } // matrix(n*[0.0] + n*[1.0]) c := matrix.FloatZeros(2*n, 1) c.SubMatrix(n, 0).SetIndexes(1.0) h := matrix.FloatZeros(2*n+m+1, 1) h.SetIndexes(1.0, 2*n) // h[2*n+1:] = -b h.SubMatrix(2*n+1, 0).Plus(b).Scale(-1.0) G := &matrixFs{A} dims := sets.DSetNew("l", "q", "s") dims.Set("l", []int{2 * n}) dims.Set("q", []int{m + 1}) var solopts cvx.SolverOptions solopts.ShowProgress = true if maxIter > 0 { solopts.MaxIter = maxIter } if len(solver) > 0 { solopts.KKTSolverName = solver } return cvx.ConeLpCustomMatrix(c, G, h, nil, nil, dims, Fkkt, &solopts, nil, nil) }