func (p *acenterProg) F2(x, z *matrix.FloatMatrix) (f, Df, H *matrix.FloatMatrix, err error) { f, Df, err = p.F1(x) u := matrix.Pow(x, 2.0).Scale(-1.0).Add(1.0) z0 := z.GetIndex(0) u2 := matrix.Pow(u, 2.0) hd := matrix.Div(matrix.Add(u2, 1.0), u2).Scale(2 * z0) H = matrix.FloatDiagonal(hd.NumElements(), hd.FloatArray()...) return }
func (p *floorPlan) F2(x, z *matrix.FloatMatrix) (f, Df, H *matrix.FloatMatrix, err error) { f, Df, err = p.F1(x) x17 := matrix.FloatVector(x.FloatArray()[17:]) tmp := matrix.Div(p.Amin, matrix.Pow(x17, 3.0)) tmp = matrix.Mul(z, tmp).Scale(2.0) diag := matrix.FloatDiagonal(5, tmp.FloatArray()...) H = matrix.FloatZeros(22, 22) H.SetSubMatrix(17, 17, diag) return }
func (p *floorPlan) F1(x *matrix.FloatMatrix) (f, Df *matrix.FloatMatrix, err error) { err = nil mn := x.Min(-1, -2, -3, -4, -5) if mn <= 0.0 { f, Df = nil, nil return } zeros := matrix.FloatZeros(5, 12) dk1 := matrix.FloatDiagonal(5, -1.0) dk2 := matrix.FloatZeros(5, 5) x17 := matrix.FloatVector(x.FloatArray()[17:]) // -( Amin ./ (x17 .* x17) ) diag := matrix.Div(p.Amin, matrix.Mul(x17, x17)).Scale(-1.0) dk2.SetIndexesFromArray(diag.FloatArray(), matrix.MakeDiagonalSet(5)...) Df, _ = matrix.FloatMatrixStacked(matrix.StackRight, zeros, dk1, dk2) x12 := matrix.FloatVector(x.FloatArray()[12:17]) // f = -x[12:17] + div(Amin, x[17:]) == div(Amin, x[17:]) - x[12:17] f = matrix.Minus(matrix.Div(p.Amin, x17), x12) return }
func (p *acenterProg) F1(x *matrix.FloatMatrix) (f, Df *matrix.FloatMatrix, err error) { f = nil Df = nil err = nil max := matrix.Abs(x).Max() if max >= 1.0 { err = errors.New("max(abs(x)) >= 1.0") return } // u = 1 - x**2 u := matrix.Pow(x, 2.0).Scale(-1.0).Add(1.0) val := -matrix.Log(u).Sum() f = matrix.FloatValue(val) Df = matrix.Div(matrix.Scale(x, 2.0), u).Transpose() return }
func qcl1(A, b *matrix.FloatMatrix) (*cvx.Solution, error) { // Returns the solution u, z of // // (primal) minimize || u ||_1 // subject to || A * u - b ||_2 <= 1 // // (dual) maximize b^T z - ||z||_2 // subject to || A'*z ||_inf <= 1. // // Exploits structure, assuming A is m by n with m >= n. m, n := A.Size() Fkkt := func(W *sets.FloatMatrixSet) (f cvx.KKTFunc, err error) { minor := 0 if !checkpnt.MinorEmpty() { minor = checkpnt.MinorTop() } err = nil f = nil beta := W.At("beta")[0].GetIndex(0) v := W.At("v")[0] // As = 2 * v *(v[1:].T * A) //v_1 := matrix.FloatNew(1, v.NumElements()-1, v.FloatArray()[1:]) v_1 := v.SubMatrix(1, 0).Transpose() As := matrix.Times(v, matrix.Times(v_1, A)).Scale(2.0) //As_1 := As.GetSubMatrix(1, 0, m, n) //As_1.Scale(-1.0) //As.SetSubMatrix(1, 0, matrix.Minus(As_1, A)) As_1 := As.SubMatrix(1, 0, m, n) As_1.Scale(-1.0) As_1.Minus(A) As.Scale(1.0 / beta) S := matrix.Times(As.Transpose(), As) checkpnt.AddMatrixVar("S", S) d1 := W.At("d")[0].SubMatrix(0, 0, n, 1).Copy() d2 := W.At("d")[0].SubMatrix(n, 0).Copy() // D = 4.0 * (d1**2 + d2**2)**-1 d := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)).Inv().Scale(4.0) // S[::n+1] += d S.Diag().Plus(d.Transpose()) err = lapack.Potrf(S) checkpnt.Check("00-Fkkt", minor) if err != nil { return } f = func(x, y, z *matrix.FloatMatrix) (err error) { minor := 0 if !checkpnt.MinorEmpty() { minor = checkpnt.MinorTop() } else { loopf += 1 minor = loopf } checkpnt.Check("00-f", minor) // -- z := - W**-T * z // z[:n] = -div( z[:n], d1 ) z_val := z.SubMatrix(0, 0, n, 1) z_res := matrix.Div(z_val, d1).Scale(-1.0) z.SubMatrix(0, 0, n, 1).Set(z_res) // z[n:2*n] = -div( z[n:2*n], d2 ) z_val = z.SubMatrix(n, 0, n, 1) z_res = matrix.Div(z_val, d2).Scale(-1.0) z.SubMatrix(n, 0, n, 1).Set(z_res) // z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) v0_z2n := v.GetIndex(0) * z.GetIndex(2*n) v1_z2n := blas.DotFloat(v, z, &linalg.IOpt{"offsetx", 1}, &linalg.IOpt{"offsety", 2*n + 1}) z_res = matrix.Scale(v, -2.0*(v0_z2n-v1_z2n)) z.SubMatrix(2*n, 0, z_res.NumElements(), 1).Plus(z_res) // z[2*n+1:] *= -1.0 z.SubMatrix(2*n+1, 0).Scale(-1.0) // z[2*n:] /= beta z.SubMatrix(2*n, 0).Scale(1.0 / beta) // -- x := x - G' * W**-1 * z // z_n = z[:n], z_2n = z[n:2*n], z_m = z[-(m+1):], z_n := z.SubMatrix(0, 0, n, 1) z_2n := z.SubMatrix(n, 0, n, 1) z_m := z.SubMatrix(z.NumElements()-(m+1), 0) // x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):] z_res = matrix.Minus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2)) a_res := matrix.Times(As.Transpose(), z_m) z_res.Plus(a_res).Scale(-1.0) x.SubMatrix(0, 0, n, 1).Plus(z_res) // x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) z_res = matrix.Plus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2)) x.SubMatrix(n, 0, z_res.NumElements(), 1).Plus(z_res) checkpnt.Check("15-f", minor) // Solve for x[:n]: // // S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] // w1 = (d1**2 - d2**2), w2 = (d1**2 + d2**2) w1 := matrix.Minus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)) w2 := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)) // x[:n] += -mul( div(w1, w2), x[n:]) x_n := x.SubMatrix(n, 0) x_val := matrix.Mul(matrix.Div(w1, w2), x_n).Scale(-1.0) x.SubMatrix(0, 0, n, 1).Plus(x_val) checkpnt.Check("25-f", minor) // Solve for x[n:]: // // (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] err = lapack.Potrs(S, x) if err != nil { fmt.Printf("Potrs error: %s\n", err) } checkpnt.Check("30-f", minor) // Solve for x[n:]: // // (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] // w1 = (d1**-2 - d2**-2), w2 = (d1**-2 + d2**-2) w1 = matrix.Minus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv()) w2 = matrix.Plus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv()) x_n = x.SubMatrix(0, 0, n, 1) // x[n:] += mul( d1**-2 - d2**-2, x[:n]) x_val = matrix.Mul(w1, x_n) x.SubMatrix(n, 0, x_val.NumElements(), 1).Plus(x_val) checkpnt.Check("35-f", minor) // x[n:] = div( x[n:], d1**-2 + d2**-2) x_n = x.SubMatrix(n, 0) x_val = matrix.Div(x_n, w2) x.SubMatrix(n, 0, x_val.NumElements(), 1).Set(x_val) checkpnt.Check("40-f", minor) // x_n = x[:n], x-2n = x[n:2*n] x_n = x.SubMatrix(0, 0, n, 1) x_2n := x.SubMatrix(n, 0, n, 1) // z := z + W^-T * G*x // z[:n] += div( x[:n] - x[n:2*n], d1) x_val = matrix.Div(matrix.Minus(x_n, x_2n), d1) z.SubMatrix(0, 0, n, 1).Plus(x_val) checkpnt.Check("44-f", minor) // z[n:2*n] += div( -x[:n] - x[n:2*n], d2) x_val = matrix.Div(matrix.Plus(x_n, x_2n).Scale(-1.0), d2) z.SubMatrix(n, 0, n, 1).Plus(x_val) checkpnt.Check("48-f", minor) // z[2*n:] += As*x[:n] x_val = matrix.Times(As, x_n) z.SubMatrix(2*n, 0, x_val.NumElements(), 1).Plus(x_val) checkpnt.Check("50-f", minor) return nil } return } // matrix(n*[0.0] + n*[1.0]) c := matrix.FloatZeros(2*n, 1) c.SubMatrix(n, 0).SetIndexes(1.0) h := matrix.FloatZeros(2*n+m+1, 1) h.SetIndexes(1.0, 2*n) // h[2*n+1:] = -b h.SubMatrix(2*n+1, 0).Plus(b).Scale(-1.0) G := &matrixFs{A} dims := sets.DSetNew("l", "q", "s") dims.Set("l", []int{2 * n}) dims.Set("q", []int{m + 1}) var solopts cvx.SolverOptions solopts.ShowProgress = true if maxIter > 0 { solopts.MaxIter = maxIter } if len(solver) > 0 { solopts.KKTSolverName = solver } return cvx.ConeLpCustomMatrix(c, G, h, nil, nil, dims, Fkkt, &solopts, nil, nil) }
/* Returns the Nesterov-Todd scaling W at points s and z, and stores the scaled variable in lmbda. W * z = W^{-T} * s = lmbda. W is a MatrixSet with entries: - W['dnl']: positive vector - W['dnli']: componentwise inverse of W['dnl'] - W['d']: positive vector - W['di']: componentwise inverse of W['d'] - W['v']: lists of 2nd order cone vectors with unit hyperbolic norms - W['beta']: list of positive numbers - W['r']: list of square matrices - W['rti']: list of square matrices. rti[k] is the inverse transpose of r[k]. */ func computeScaling(s, z, lmbda *matrix.FloatMatrix, dims *sets.DimensionSet, mnl int) (W *sets.FloatMatrixSet, err error) { /*DEBUGGED*/ err = nil W = sets.NewFloatSet("dnl", "dnli", "d", "di", "v", "beta", "r", "rti") // For the nonlinear block: // // W['dnl'] = sqrt( s[:mnl] ./ z[:mnl] ) // W['dnli'] = sqrt( z[:mnl] ./ s[:mnl] ) // lambda[:mnl] = sqrt( s[:mnl] .* z[:mnl] ) var stmp, ztmp, lmd *matrix.FloatMatrix if mnl > 0 { stmp = matrix.FloatVector(s.FloatArray()[:mnl]) ztmp = matrix.FloatVector(z.FloatArray()[:mnl]) //dnl := stmp.Div(ztmp) //dnl.Apply(dnl, math.Sqrt) dnl := matrix.Sqrt(matrix.Div(stmp, ztmp)) //dnli := dnl.Copy() //dnli.Apply(dnli, func(a float64)float64 { return 1.0/a }) dnli := matrix.Inv(dnl) W.Set("dnl", dnl) W.Set("dnli", dnli) //lmd = stmp.Mul(ztmp) //lmd.Apply(lmd, math.Sqrt) lmd = matrix.Sqrt(matrix.Mul(stmp, ztmp)) lmbda.SetIndexesFromArray(lmd.FloatArray(), matrix.MakeIndexSet(0, mnl, 1)...) } else { // set for empty matrices //W.Set("dnl", matrix.FloatZeros(0, 1)) //W.Set("dnli", matrix.FloatZeros(0, 1)) mnl = 0 } // For the 'l' block: // // W['d'] = sqrt( sk ./ zk ) // W['di'] = sqrt( zk ./ sk ) // lambdak = sqrt( sk .* zk ) // // where sk and zk are the first dims['l'] entries of s and z. // lambda_k is stored in the first dims['l'] positions of lmbda. m := dims.At("l")[0] //td := s.FloatArray() stmp = matrix.FloatVector(s.FloatArray()[mnl : mnl+m]) //zd := z.FloatArray() ztmp = matrix.FloatVector(z.FloatArray()[mnl : mnl+m]) //fmt.Printf(".Sqrt()=\n%v\n", matrix.Div(stmp, ztmp).Sqrt().ToString("%.17f")) //d := stmp.Div(ztmp) //d.Apply(d, math.Sqrt) d := matrix.Div(stmp, ztmp).Sqrt() //di := d.Copy() //di.Apply(di, func(a float64)float64 { return 1.0/a }) di := matrix.Inv(d) //fmt.Printf("d:\n%v\n", d) //fmt.Printf("di:\n%v\n", di) W.Set("d", d) W.Set("di", di) //lmd = stmp.Mul(ztmp) //lmd.Apply(lmd, math.Sqrt) lmd = matrix.Mul(stmp, ztmp).Sqrt() // lmd has indexes mnl:mnl+m and length of m lmbda.SetIndexesFromArray(lmd.FloatArray(), matrix.MakeIndexSet(mnl, mnl+m, 1)...) //fmt.Printf("after l:\n%v\n", lmbda) /* For the 'q' blocks, compute lists 'v', 'beta'. The vector v[k] has unit hyperbolic norm: (sqrt( v[k]' * J * v[k] ) = 1 with J = [1, 0; 0, -I]). beta[k] is a positive scalar. The hyperbolic Householder matrix H = 2*v[k]*v[k]' - J defined by v[k] satisfies (beta[k] * H) * zk = (beta[k] * H) \ sk = lambda_k where sk = s[indq[k]:indq[k+1]], zk = z[indq[k]:indq[k+1]]. lambda_k is stored in lmbda[indq[k]:indq[k+1]]. */ ind := mnl + dims.At("l")[0] var beta *matrix.FloatMatrix for _, k := range dims.At("q") { W.Append("v", matrix.FloatZeros(k, 1)) } beta = matrix.FloatZeros(len(dims.At("q")), 1) W.Set("beta", beta) vset := W.At("v") for k, m := range dims.At("q") { v := vset[k] // a = sqrt( sk' * J * sk ) where J = [1, 0; 0, -I] aa := jnrm2(s, m, ind) // b = sqrt( zk' * J * zk ) bb := jnrm2(z, m, ind) // beta[k] = ( a / b )**1/2 beta.SetIndex(k, math.Sqrt(aa/bb)) // c = sqrt( (sk/a)' * (zk/b) + 1 ) / sqrt(2) c0 := blas.DotFloat(s, z, &la_.IOpt{"n", m}, &la_.IOpt{"offsetx", ind}, &la_.IOpt{"offsety", ind}) cc := math.Sqrt((c0/aa/bb + 1.0) / 2.0) // vk = 1/(2*c) * ( (sk/a) + J * (zk/b) ) blas.CopyFloat(z, v, &la_.IOpt{"offsetx", ind}, &la_.IOpt{"n", m}) blas.ScalFloat(v, -1.0/bb) v.SetIndex(0, -1.0*v.GetIndex(0)) blas.AxpyFloat(s, v, 1.0/aa, &la_.IOpt{"offsetx", ind}, &la_.IOpt{"n", m}) blas.ScalFloat(v, 1.0/2.0/cc) // v[k] = 1/sqrt(2*(vk0 + 1)) * ( vk + e ), e = [1; 0] v.SetIndex(0, v.GetIndex(0)+1.0) blas.ScalFloat(v, (1.0 / math.Sqrt(2.0*v.GetIndex(0)))) /* To get the scaled variable lambda_k d = sk0/a + zk0/b + 2*c lambda_k = [ c; (c + zk0/b)/d * sk1/a + (c + sk0/a)/d * zk1/b ] lambda_k *= sqrt(a * b) */ lmbda.SetIndex(ind, cc) dd := 2*cc + s.GetIndex(ind)/aa + z.GetIndex(ind)/bb blas.CopyFloat(s, lmbda, &la_.IOpt{"offsetx", ind + 1}, &la_.IOpt{"offsety", ind + 1}, &la_.IOpt{"n", m - 1}) zz := (cc + z.GetIndex(ind)/bb) / dd / aa ss := (cc + s.GetIndex(ind)/aa) / dd / bb blas.ScalFloat(lmbda, zz, &la_.IOpt{"offset", ind + 1}, &la_.IOpt{"n", m - 1}) blas.AxpyFloat(z, lmbda, ss, &la_.IOpt{"offsetx", ind + 1}, &la_.IOpt{"offsety", ind + 1}, &la_.IOpt{"n", m - 1}) blas.ScalFloat(lmbda, math.Sqrt(aa*bb), &la_.IOpt{"offset", ind}, &la_.IOpt{"n", m}) ind += m //fmt.Printf("after q[%d]:\n%v\n", k, lmbda) } /* For the 's' blocks: compute two lists 'r' and 'rti'. r[k]' * sk^{-1} * r[k] = diag(lambda_k)^{-1} r[k]' * zk * r[k] = diag(lambda_k) where sk and zk are the entries inds[k] : inds[k+1] of s and z, reshaped into symmetric matrices. rti[k] is the inverse of r[k]', so rti[k]' * sk * rti[k] = diag(lambda_k)^{-1} rti[k]' * zk^{-1} * rti[k] = diag(lambda_k). The vectors lambda_k are stored in lmbda[ dims['l'] + sum(dims['q']) : -1 ] */ for _, k := range dims.At("s") { W.Append("r", matrix.FloatZeros(k, k)) W.Append("rti", matrix.FloatZeros(k, k)) } maxs := maxdim(dims.At("s")) work := matrix.FloatZeros(maxs*maxs, 1) Ls := matrix.FloatZeros(maxs*maxs, 1) Lz := matrix.FloatZeros(maxs*maxs, 1) ind2 := ind for k, m := range dims.At("s") { r := W.At("r")[k] rti := W.At("rti")[k] // Factor sk = Ls*Ls'; store Ls in ds[inds[k]:inds[k+1]]. blas.CopyFloat(s, Ls, &la_.IOpt{"offsetx", ind2}, &la_.IOpt{"n", m * m}) lapack.PotrfFloat(Ls, &la_.IOpt{"n", m}, &la_.IOpt{"lda", m}) // Factor zs[k] = Lz*Lz'; store Lz in dz[inds[k]:inds[k+1]]. blas.CopyFloat(z, Lz, &la_.IOpt{"offsetx", ind2}, &la_.IOpt{"n", m * m}) lapack.PotrfFloat(Lz, &la_.IOpt{"n", m}, &la_.IOpt{"lda", m}) // SVD Lz'*Ls = U*diag(lambda_k)*V'. Keep U in work. for i := 0; i < m; i++ { blas.ScalFloat(Ls, 0.0, &la_.IOpt{"offset", i * m}, &la_.IOpt{"n", i}) } blas.CopyFloat(Ls, work, &la_.IOpt{"n", m * m}) blas.TrmmFloat(Lz, work, 1.0, la_.OptTransA, &la_.IOpt{"lda", m}, &la_.IOpt{"ldb", m}, &la_.IOpt{"n", m}, &la_.IOpt{"m", m}) lapack.GesvdFloat(work, lmbda, nil, nil, la_.OptJobuO, &la_.IOpt{"lda", m}, &la_.IOpt{"offsetS", ind}, &la_.IOpt{"n", m}, &la_.IOpt{"m", m}) // r = Lz^{-T} * U blas.CopyFloat(work, r, &la_.IOpt{"n", m * m}) blas.TrsmFloat(Lz, r, 1.0, la_.OptTransA, &la_.IOpt{"lda", m}, &la_.IOpt{"n", m}, &la_.IOpt{"m", m}) // rti = Lz * U blas.CopyFloat(work, rti, &la_.IOpt{"n", m * m}) blas.TrmmFloat(Lz, rti, 1.0, &la_.IOpt{"lda", m}, &la_.IOpt{"n", m}, &la_.IOpt{"m", m}) // r := r * diag(sqrt(lambda_k)) // rti := rti * diag(1 ./ sqrt(lambda_k)) for i := 0; i < m; i++ { a := math.Sqrt(lmbda.GetIndex(ind + i)) blas.ScalFloat(r, a, &la_.IOpt{"offset", m * i}, &la_.IOpt{"n", m}) blas.ScalFloat(rti, 1.0/a, &la_.IOpt{"offset", m * i}, &la_.IOpt{"n", m}) } ind += m ind2 += m * m } return }