func TestSymm() { x := matrix.FloatNew(3, 3, []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}) r := matrix.FloatNew(3, 3, []float64{1, 2, 3, 2, 5, 6, 3, 6, 9}) fmt.Printf("x=\n%v\n", x) cvx.Symm(x, 3, 0) fmt.Printf("x=\n%v\n", x) if x.Equal(r) { fmt.Printf("OK\n") } else { fmt.Printf("FAILED\n") } }
func TestDGetrf(t *testing.T) { ipiv := make([]int32, 3) A := matrix.FloatNew(3, 2, []float64{1, 2, 3, 4, 5, 6}) fmt.Printf("pre A:\n%s\n", A) err := Getrf(A, ipiv) fmt.Printf("err=%s, ipiv=%s\n", err, ipiv) fmt.Printf("post A:\n%s\n", A) }
func TestDgemv(t *testing.T) { fmt.Printf("* L2 * test gemv: Y = alpha * A * X + beta * Y\n") A := matrix.FloatNew(3, 2, []float64{1, 1, 1, 2, 2, 2}) X := matrix.FloatVector([]float64{1, 1}) Y := matrix.FloatVector([]float64{0, 0, 0}) alpha := matrix.FScalar(1.0) beta := matrix.FScalar(0.0) fmt.Printf("before: alpha=1.0, beta=0.0\nA=\n%v\nX=\n%v\nY=\n%v\n", A, X, Y) err := Gemv(A, X, Y, alpha, beta) fmt.Printf("after:\nA=\n%v\nX=\n%v\nY=\n%v\n", A, X, Y) fmt.Printf("* L2 * test gemv: X = alpha * A.T * Y + beta * X\n") err = Gemv(A, Y, X, alpha, beta, linalg.OptTrans) if err != nil { fmt.Printf("error: %s\n", err) } fmt.Printf("after:\nA=\n%v\nX=\n%v\nY=\n%v\n", A, X, Y) }
func main() { Sdata := [][]float64{ []float64{ 4e-2, 6e-3, -4e-3, 0.0 }, []float64{ 6e-3, 1e-2, 0.0, 0.0 }, []float64{-4e-3, 0.0, 2.5e-3, 0.0 }, []float64{ 0.0, 0.0, 0.0, 0.0 }} pbar := matrix.FloatVector([]float64{.12, .10, .07, .03}) S := matrix.FloatMatrixStacked(Sdata) n := pbar.Rows() G := matrix.FloatDiagonal(n, -1.0) h := matrix.FloatZeros(n, 1) A := matrix.FloatWithValue(1, n, 1.0) b := matrix.FloatNew(1,1, []float64{1.0}) var solopts cvx.SolverOptions solopts.MaxIter = 30 solopts.ShowProgress = true mu := 1.0 Smu := S.Copy().Scale(mu) pbarNeg := pbar.Copy().Scale(-1.0) fmt.Printf("Smu=\n%v\n", Smu.String()) fmt.Printf("-pbar=\n%v\n", pbarNeg.String()) sol, err := cvx.Qp(Smu, pbarNeg, G, h, A, b, &solopts, nil) fmt.Printf("status: %v\n", err) if sol != nil && sol.Status == cvx.Optimal { x := sol.Result.At("x")[0] ret := blas.DotFloat(x, pbar) risk := math.Sqrt(blas.DotFloat(x, S.Times(x))) fmt.Printf("ret=%.3f, risk=%.3f\n", ret, risk) fmt.Printf("x=\n%v\n", x) } }
// Solves a pair of primal and dual SDPs // // minimize c'*x // subject to Gl*x + sl = hl // mat(Gs[k]*x) + ss[k] = hs[k], k = 0, ..., N-1 // A*x = b // sl >= 0, ss[k] >= 0, k = 0, ..., N-1 // // maximize -hl'*z - sum_k trace(hs[k]*zs[k]) - b'*y // subject to Gl'*zl + sum_k Gs[k]'*vec(zs[k]) + A'*y + c = 0 // zl >= 0, zs[k] >= 0, k = 0, ..., N-1. // // The inequalities sl >= 0 and zl >= 0 are elementwise vector // inequalities. The inequalities ss[k] >= 0, zs[k] >= 0 are matrix // inequalities, i.e., the symmetric matrices ss[k] and zs[k] must be // positive semidefinite. mat(Gs[k]*x) is the symmetric matrix X with // X[:] = Gs[k]*x. For a symmetric matrix, zs[k], vec(zs[k]) is the // vector zs[k][:]. // func Sdp(c, Gl, hl, A, b *matrix.FloatMatrix, Ghs *FloatMatrixSet, solopts *SolverOptions, primalstart, dualstart *FloatMatrixSet) (sol *Solution, err error) { if c == nil { err = errors.New("'c' must a column matrix") return } n := c.Rows() if n < 1 { err = errors.New("Number of variables must be at least 1") return } if Gl == nil { Gl = matrix.FloatZeros(0, n) } if Gl.Cols() != n { err = errors.New(fmt.Sprintf("'G' must be matrix with %d columns", n)) return } ml := Gl.Rows() if hl == nil { hl = matrix.FloatZeros(0, 1) } if !hl.SizeMatch(ml, 1) { err = errors.New(fmt.Sprintf("'hl' must be matrix of size (%d,1)", ml)) return } Gsset := Ghs.At("Gs") ms := make([]int, 0) for i, Gs := range Gsset { if Gs.Cols() != n { err = errors.New(fmt.Sprintf("'Gs' must be list of matrices with %d columns", n)) return } sz := int(math.Sqrt(float64(Gs.Rows()))) if Gs.Rows() != sz*sz { err = errors.New(fmt.Sprintf("the squareroot of the number of rows of 'Gq[%d]' is not an integer", i)) return } ms = append(ms, sz) } hsset := Ghs.At("hs") if len(Gsset) != len(hsset) { err = errors.New(fmt.Sprintf("'hs' must be a list of %d matrices", len(Gsset))) return } for i, hs := range hsset { if !hs.SizeMatch(ms[i], ms[i]) { s := fmt.Sprintf("hq[%d] has size (%d,%d). Expected size is (%d,%d)", i, hs.Rows(), hs.Cols(), ms[i], ms[i]) err = errors.New(s) return } } if A == nil { A = matrix.FloatZeros(0, n) } if A.Cols() != n { err = errors.New(fmt.Sprintf("'A' must be matrix with %d columns", n)) return } p := A.Rows() if b == nil { b = matrix.FloatZeros(0, 1) } if !b.SizeMatch(p, 1) { err = errors.New(fmt.Sprintf("'b' must be matrix of size (%d,1)", p)) return } dims := DSetNew("l", "q", "s") dims.Set("l", []int{ml}) dims.Set("s", ms) N := dims.Sum("l") + dims.SumSquared("s") // Map hs matrices to h vector h := matrix.FloatZeros(N, 1) h.SetIndexes(matrix.MakeIndexSet(0, ml, 1), hl.FloatArray()[:ml]) ind := ml for k, hs := range hsset { h.SetIndexes(matrix.MakeIndexSet(ind, ind+ms[k]*ms[k], 1), hs.FloatArray()) ind += ms[k] * ms[k] } Gargs := make([]*matrix.FloatMatrix, 0) Gargs = append(Gargs, Gl) Gargs = append(Gargs, Gsset...) G, sizeg := matrix.FloatMatrixCombined(matrix.StackDown, Gargs...) var pstart, dstart *FloatMatrixSet = nil, nil if primalstart != nil { pstart = FloatSetNew("x", "s") pstart.Set("x", primalstart.At("x")[0]) slset := primalstart.At("sl") margs := make([]*matrix.FloatMatrix, 0, len(slset)+1) margs = append(margs, primalstart.At("s")[0]) margs = append(margs, slset...) sl, _ := matrix.FloatMatrixCombined(matrix.StackDown, margs...) pstart.Set("s", sl) } if dualstart != nil { dstart = FloatSetNew("y", "z") dstart.Set("y", dualstart.At("y")[0]) zlset := primalstart.At("zl") margs := make([]*matrix.FloatMatrix, 0, len(zlset)+1) margs = append(margs, dualstart.At("z")[0]) margs = append(margs, zlset...) zl, _ := matrix.FloatMatrixCombined(matrix.StackDown, margs...) dstart.Set("z", zl) } sol, err = ConeLp(c, G, h, A, b, dims, solopts, pstart, dstart) // unpack sol.Result if err == nil { s := sol.Result.At("s")[0] sl := matrix.FloatVector(s.FloatArray()[:ml]) sol.Result.Append("sl", sl) ind := ml for _, m := range ms { sk := matrix.FloatNew(m, m, s.FloatArray()[ind:ind+m*m]) sol.Result.Append("ss", sk) ind += m * m } z := sol.Result.At("z")[0] zl := matrix.FloatVector(s.FloatArray()[:ml]) sol.Result.Append("zl", zl) ind = ml for i, k := range sizeg[1:] { zk := matrix.FloatNew(ms[i], ms[i], z.FloatArray()[ind:ind+k]) sol.Result.Append("zs", zk) ind += k } } sol.Result.Remove("s") sol.Result.Remove("z") return }