예제 #1
0
파일: testqp.go 프로젝트: hrautila/go.opt
func main() {

	Sdata := [][]float64{
		[]float64{4e-2, 6e-3, -4e-3, 0.0},
		[]float64{6e-3, 1e-2, 0.0, 0.0},
		[]float64{-4e-3, 0.0, 2.5e-3, 0.0},
		[]float64{0.0, 0.0, 0.0, 0.0}}

	pbar := matrix.FloatVector([]float64{.12, .10, .07, .03})
	S := matrix.FloatMatrixFromTable(Sdata)
	n := pbar.Rows()
	G := matrix.FloatDiagonal(n, -1.0)
	h := matrix.FloatZeros(n, 1)
	A := matrix.FloatWithValue(1, n, 1.0)
	b := matrix.FloatNew(1, 1, []float64{1.0})

	var solopts cvx.SolverOptions
	solopts.MaxIter = 30
	solopts.ShowProgress = true

	mu := 1.0
	Smu := matrix.Scale(S, mu)
	pbarNeg := matrix.Scale(pbar, -1.0)
	fmt.Printf("Smu=\n%v\n", Smu.String())
	fmt.Printf("-pbar=\n%v\n", pbarNeg.String())

	sol, err := cvx.Qp(Smu, pbarNeg, G, h, A, b, &solopts, nil)

	fmt.Printf("status: %v\n", err)
	if sol != nil && sol.Status == cvx.Optimal {
		x := sol.Result.At("x")[0]
		ret := blas.DotFloat(x, pbar)
		risk := math.Sqrt(blas.DotFloat(x, S.Times(x)))
		fmt.Printf("ret=%.3f, risk=%.3f\n", ret, risk)
		fmt.Printf("x=\n%v\n", x)
	}
}
예제 #2
0
func TestAcent(t *testing.T) {
	// matrix string in row order presentation
	Adata := [][]float64{
		[]float64{-7.44e-01, 1.11e-01, 1.29e+00, 2.62e+00, -1.82e+00},
		[]float64{4.59e-01, 7.06e-01, 3.16e-01, -1.06e-01, 7.80e-01},
		[]float64{-2.95e-02, -2.22e-01, -2.07e-01, -9.11e-01, -3.92e-01},
		[]float64{-7.75e-01, 1.03e-01, -1.22e+00, -5.74e-01, -3.32e-01},
		[]float64{-1.80e+00, 1.24e+00, -2.61e+00, -9.31e-01, -6.38e-01}}

	bdata := []float64{
		8.38e-01, 9.92e-01, 9.56e-01, 6.14e-01, 6.56e-01,
		3.57e-01, 6.36e-01, 5.08e-01, 8.81e-03, 7.08e-02}

	// these are solution obtained from running cvxopt acent.py with above data
	solData := []float64{-11.59728373909344512, -1.35196389161339936,
		7.21894899350256303, -3.29159917142051528, 4.90454147385329176}

	ntData := []float64{
		1.5163484265903457, 1.2433928210771914, 1.0562922103520955, 0.8816246051011607,
		0.7271128861543598, 0.42725003346248974, 0.0816777301914883, 0.0005458037072843131,
		1.6259980735305693e-10}

	b := matrix.FloatVector(bdata)
	Al := matrix.FloatMatrixFromTable(Adata, matrix.RowOrder)
	Au := matrix.Scale(Al, -1.0)
	A := matrix.FloatZeros(2*Al.Rows(), Al.Cols())
	A.SetSubMatrix(0, 0, Al)
	A.SetSubMatrix(Al.Rows(), 0, Au)

	x, nt, err := acent(A, b, 10)
	if err != nil {
		t.Logf("Acent error: %s", err)
		t.Fail()
	}
	solref := matrix.FloatVector(solData)
	ntref := matrix.FloatVector(ntData)
	soldf := matrix.Minus(x, solref)
	ntdf := matrix.Minus(matrix.FloatVector(nt), ntref)
	solNrm := blas.Nrm2Float(soldf)
	ntNrm := blas.Nrm2Float(ntdf)
	t.Logf("x  [diff=%.2e]:\n%v\n", solNrm, x)
	t.Logf("nt [diff=%.2e]:\n%v\n", ntNrm, nt)

	if solNrm > TOL {
		t.Log("solution deviates too much from expected\n")
		t.Fail()
	}
}
예제 #3
0
파일: cp_test.go 프로젝트: hrautila/cvx
func (p *acenterProg) F1(x *matrix.FloatMatrix) (f, Df *matrix.FloatMatrix, err error) {
	f = nil
	Df = nil
	err = nil
	max := matrix.Abs(x).Max()
	if max >= 1.0 {
		err = errors.New("max(abs(x)) >= 1.0")
		return
	}
	// u = 1 - x**2
	u := matrix.Pow(x, 2.0).Scale(-1.0).Add(1.0)
	val := -matrix.Log(u).Sum()
	f = matrix.FloatValue(val)
	Df = matrix.Div(matrix.Scale(x, 2.0), u).Transpose()
	return
}
예제 #4
0
파일: chernoff.go 프로젝트: hrautila/go.opt
func main() {
	m := 6
	Vdata := [][]float64{
		[]float64{1.0, -1.0, -2.0, -2.0, 0.0, 1.5, 1.0},
		[]float64{1.0, 2.0, 1.0, -1.0, -2.0, -1.0, 1.0}}

	V := matrix.FloatMatrixFromTable(Vdata, matrix.RowOrder)

	// V[1, :m] - V[1,1:]
	a0 := matrix.Minus(V.GetSubMatrix(1, 0, 1, m), V.GetSubMatrix(1, 1, 1))
	// V[0, :m] - V[0,1:]
	a1 := matrix.Minus(V.GetSubMatrix(0, 0, 1, m), V.GetSubMatrix(0, 1, 1))
	A0, _ := matrix.FloatMatrixStacked(matrix.StackDown, a0.Scale(-1.0), a1)
	A0 = A0.Transpose()
	b0 := matrix.Mul(A0, V.GetSubMatrix(0, 0, 2, m).Transpose())
	b0 = matrix.Times(b0, matrix.FloatWithValue(2, 1, 1.0))

	A := make([]*matrix.FloatMatrix, 0)
	b := make([]*matrix.FloatMatrix, 0)
	A = append(A, A0)
	b = append(b, b0)

	// List of symbols
	C := make([]*matrix.FloatMatrix, 0)
	C = append(C, matrix.FloatZeros(2, 1))
	var row *matrix.FloatMatrix = nil
	for k := 0; k < m; k++ {
		row = A0.GetRow(k, row)
		nrm := blas.Nrm2Float(row)
		row.Scale(2.0 * b0.GetIndex(k) / (nrm * nrm))
		C = append(C, row.Transpose())
	}

	// Voronoi set around C[1]
	A1 := matrix.FloatZeros(3, 2)
	A1.SetSubMatrix(0, 0, A0.GetSubMatrix(0, 0, 1).Scale(-1.0))
	A1.SetSubMatrix(1, 0, matrix.Minus(C[m], C[1]).Transpose())
	A1.SetSubMatrix(2, 0, matrix.Minus(C[2], C[1]).Transpose())
	b1 := matrix.FloatZeros(3, 1)
	b1.SetIndex(0, -b0.GetIndex(0))
	v := matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[m], C[1])).Float() * 0.5
	b1.SetIndex(1, v)
	v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[2], C[1])).Float() * 0.5
	b1.SetIndex(2, v)
	A = append(A, A1)
	b = append(b, b1)

	// Voronoi set around C[2] ... C[5]
	for k := 2; k < 6; k++ {
		A1 = matrix.FloatZeros(3, 2)
		A1.SetSubMatrix(0, 0, A0.GetSubMatrix(k-1, 0, 1).Scale(-1.0))
		A1.SetSubMatrix(1, 0, matrix.Minus(C[k-1], C[k]).Transpose())
		A1.SetSubMatrix(2, 0, matrix.Minus(C[k+1], C[k]).Transpose())
		b1 = matrix.FloatZeros(3, 1)
		b1.SetIndex(0, -b0.GetIndex(k-1))
		v := matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[k-1], C[k])).Float() * 0.5
		b1.SetIndex(1, v)
		v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[k+1], C[k])).Float() * 0.5
		b1.SetIndex(2, v)
		A = append(A, A1)
		b = append(b, b1)
	}

	// Voronoi set around C[6]
	A1 = matrix.FloatZeros(3, 2)
	A1.SetSubMatrix(0, 0, A0.GetSubMatrix(5, 0, 1).Scale(-1.0))
	A1.SetSubMatrix(1, 0, matrix.Minus(C[1], C[6]).Transpose())
	A1.SetSubMatrix(2, 0, matrix.Minus(C[5], C[6]).Transpose())
	b1 = matrix.FloatZeros(3, 1)
	b1.SetIndex(0, -b0.GetIndex(5))
	v = matrix.Times(A1.GetRow(1, nil), matrix.Plus(C[1], C[6])).Float() * 0.5
	b1.SetIndex(1, v)
	v = matrix.Times(A1.GetRow(2, nil), matrix.Plus(C[5], C[6])).Float() * 0.5
	b1.SetIndex(2, v)

	A = append(A, A1)
	b = append(b, b1)

	P := matrix.FloatIdentity(2)
	q := matrix.FloatZeros(2, 1)
	solopts := &cvx.SolverOptions{ShowProgress: false, MaxIter: 30}
	ovals := make([]float64, 0)
	for k := 1; k < 7; k++ {
		sol, err := cvx.Qp(P, q, A[k], b[k], nil, nil, solopts, nil)
		_ = err
		x := sol.Result.At("x")[0]
		ovals = append(ovals, math.Pow(blas.Nrm2Float(x), 2.0))
	}

	optvals := matrix.FloatVector(ovals)
	//fmt.Printf("optvals=\n%v\n", optvals)

	rangeFunc := func(n int) []float64 {
		r := make([]float64, 0)
		for i := 0; i < n; i++ {
			r = append(r, float64(i))
		}
		return r
	}

	nopts := 200
	sigmas := matrix.FloatVector(rangeFunc(nopts))
	sigmas.Scale((0.5 - 0.2) / float64(nopts)).Add(0.2)

	bndsVal := func(sigma float64) float64 {
		// 1.0 - sum(exp( -optvals/(2*sigma**2)))
		return 1.0 - matrix.Exp(matrix.Scale(optvals, -1.0/(2*sigma*sigma))).Sum()
	}
	bnds := matrix.FloatZeros(sigmas.NumElements(), 1)
	for j, v := range sigmas.FloatArray() {
		bnds.SetIndex(j, bndsVal(v))
	}
	plotData("plot.png", sigmas.FloatArray(), bnds.FloatArray())
}
예제 #5
0
파일: cvxfit.go 프로젝트: hrautila/go.opt
func main() {
	flag.Parse()

	m := len(udata)
	nvars := 2 * m
	u := matrix.FloatVector(udata[:m])
	y := matrix.FloatVector(ydata[:m])

	// minimize    (1/2) * || yhat - y ||_2^2
	// subject to  yhat[j] >= yhat[i] + g[i]' * (u[j] - u[i]), j, i = 0,...,m-1
	//
	// Variables  yhat (m), g (m).

	P := matrix.FloatZeros(nvars, nvars)
	// set m first diagonal indexes to 1.0
	//P.SetIndexes(1.0, matrix.DiagonalIndexes(P)[:m]...)
	P.Diag().SubMatrix(0, 0, 1, m).SetIndexes(1.0)
	q := matrix.FloatZeros(nvars, 1)
	q.SubMatrix(0, 0, y.NumElements(), 1).Plus(matrix.Scale(y, -1.0))

	// m blocks (i = 0,...,m-1) of linear inequalities
	//
	//     yhat[i] + g[i]' * (u[j] - u[i]) <= yhat[j], j = 0,...,m-1.

	G := matrix.FloatZeros(m*m, nvars)
	I := matrix.FloatDiagonal(m, 1.0)

	for i := 0; i < m; i++ {
		// coefficients of yhat[i] (column i)
		//G.Set(1.0, matrix.ColumnIndexes(G, i)[i*m:(i+1)*m]...)
		column(G, i).SetIndexes(1.0)

		// coefficients of gi[i] (column i, rows i*m ... (i+1)*m)
		//rows := matrix.Indexes(i*m, (i+1)*m)
		//G.SetAtColumnArray(m+i, rows, matrix.Add(u, -u.GetIndex(i)).FloatArray())

		// coefficients of gi[i] (column i, rows i*m ... (i+1)*m)
		// from column m+i staring at row i*m select m rows and one column
		G.SubMatrix(i*m, m+i, m, 1).Plus(matrix.Add(u, -u.GetIndex(i)))

		// coeffients of yhat[i]) from rows i*m ... (i+1)*m, cols 0 ... m
		//G.SetSubMatrix(i*m, 0, matrix.Minus(G.GetSubMatrix(i*m, 0, m, m), I))
		G.SubMatrix(i*m, 0, m, m).Minus(I)
	}

	h := matrix.FloatZeros(m*m, 1)
	var A, b *matrix.FloatMatrix = nil, nil
	var solopts cvx.SolverOptions
	solopts.ShowProgress = true
	solopts.KKTSolverName = solver

	sol, err := cvx.Qp(P, q, G, h, A, b, &solopts, nil)
	if err != nil {
		fmt.Printf("error: %v\n", err)
		return
	}
	if sol != nil && sol.Status != cvx.Optimal {
		fmt.Printf("status not optimal\n")
		return
	}
	x := sol.Result.At("x")[0]
	//yhat := matrix.FloatVector(x.FloatArray()[:m])
	//g := matrix.FloatVector(x.FloatArray()[m:])
	yhat := x.SubMatrix(0, 0, m, 1).Copy()
	g := x.SubMatrix(m, 0).Copy()

	rangeFunc := func(n int) []float64 {
		r := make([]float64, 0)
		for i := 0; i < n; i++ {
			r = append(r, float64(i)*2.2/float64(n))
		}
		return r
	}
	ts := rangeFunc(1000)
	fitFunc := func(points []float64) []float64 {
		res := make([]float64, len(points))
		for k, t := range points {
			res[k] = matrix.Plus(yhat, matrix.Mul(g, matrix.Scale(u, -1.0).Add(t))).Max()
		}
		return res
	}
	fs := fitFunc(ts)
	plotData("cvxfit.png", u.FloatArray(), y.FloatArray(), ts, fs)
}
예제 #6
0
파일: testqcl1.go 프로젝트: hrautila/go.opt
func qcl1(A, b *matrix.FloatMatrix) (*cvx.Solution, error) {

	// Returns the solution u, z of
	//
	//   (primal)  minimize    || u ||_1
	//             subject to  || A * u - b ||_2  <= 1
	//
	//   (dual)    maximize    b^T z - ||z||_2
	//             subject to  || A'*z ||_inf <= 1.
	//
	// Exploits structure, assuming A is m by n with m >= n.

	m, n := A.Size()
	Fkkt := func(W *sets.FloatMatrixSet) (f cvx.KKTFunc, err error) {

		minor := 0
		if !checkpnt.MinorEmpty() {
			minor = checkpnt.MinorTop()
		}

		err = nil
		f = nil
		beta := W.At("beta")[0].GetIndex(0)
		v := W.At("v")[0]

		// As = 2 * v *(v[1:].T * A)
		//v_1 := matrix.FloatNew(1, v.NumElements()-1, v.FloatArray()[1:])
		v_1 := v.SubMatrix(1, 0).Transpose()

		As := matrix.Times(v, matrix.Times(v_1, A)).Scale(2.0)

		//As_1 := As.GetSubMatrix(1, 0, m, n)
		//As_1.Scale(-1.0)
		//As.SetSubMatrix(1, 0, matrix.Minus(As_1, A))
		As_1 := As.SubMatrix(1, 0, m, n)
		As_1.Scale(-1.0)
		As_1.Minus(A)
		As.Scale(1.0 / beta)

		S := matrix.Times(As.Transpose(), As)
		checkpnt.AddMatrixVar("S", S)

		d1 := W.At("d")[0].SubMatrix(0, 0, n, 1).Copy()
		d2 := W.At("d")[0].SubMatrix(n, 0).Copy()

		// D = 4.0 * (d1**2 + d2**2)**-1
		d := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2)).Inv().Scale(4.0)
		// S[::n+1] += d
		S.Diag().Plus(d.Transpose())

		err = lapack.Potrf(S)
		checkpnt.Check("00-Fkkt", minor)
		if err != nil {
			return
		}

		f = func(x, y, z *matrix.FloatMatrix) (err error) {

			minor := 0
			if !checkpnt.MinorEmpty() {
				minor = checkpnt.MinorTop()
			} else {
				loopf += 1
				minor = loopf
			}
			checkpnt.Check("00-f", minor)

			// -- z := - W**-T * z
			// z[:n] = -div( z[:n], d1 )
			z_val := z.SubMatrix(0, 0, n, 1)
			z_res := matrix.Div(z_val, d1).Scale(-1.0)
			z.SubMatrix(0, 0, n, 1).Set(z_res)

			// z[n:2*n] = -div( z[n:2*n], d2 )
			z_val = z.SubMatrix(n, 0, n, 1)
			z_res = matrix.Div(z_val, d2).Scale(-1.0)
			z.SubMatrix(n, 0, n, 1).Set(z_res)

			// z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) )
			v0_z2n := v.GetIndex(0) * z.GetIndex(2*n)
			v1_z2n := blas.DotFloat(v, z, &linalg.IOpt{"offsetx", 1}, &linalg.IOpt{"offsety", 2*n + 1})
			z_res = matrix.Scale(v, -2.0*(v0_z2n-v1_z2n))
			z.SubMatrix(2*n, 0, z_res.NumElements(), 1).Plus(z_res)

			// z[2*n+1:] *= -1.0
			z.SubMatrix(2*n+1, 0).Scale(-1.0)

			// z[2*n:] /= beta
			z.SubMatrix(2*n, 0).Scale(1.0 / beta)

			// -- x := x - G' * W**-1 * z

			// z_n = z[:n], z_2n = z[n:2*n], z_m = z[-(m+1):],
			z_n := z.SubMatrix(0, 0, n, 1)
			z_2n := z.SubMatrix(n, 0, n, 1)
			z_m := z.SubMatrix(z.NumElements()-(m+1), 0)

			// x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):]
			z_res = matrix.Minus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2))
			a_res := matrix.Times(As.Transpose(), z_m)
			z_res.Plus(a_res).Scale(-1.0)
			x.SubMatrix(0, 0, n, 1).Plus(z_res)

			// x[n:] += div(z[:n], d1) + div(z[n:2*n], d2)
			z_res = matrix.Plus(matrix.Div(z_n, d1), matrix.Div(z_2n, d2))
			x.SubMatrix(n, 0, z_res.NumElements(), 1).Plus(z_res)
			checkpnt.Check("15-f", minor)

			// Solve for x[:n]:
			//
			//    S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:]

			// w1 = (d1**2 - d2**2), w2 = (d1**2 + d2**2)
			w1 := matrix.Minus(matrix.Mul(d1, d1), matrix.Mul(d2, d2))
			w2 := matrix.Plus(matrix.Mul(d1, d1), matrix.Mul(d2, d2))

			// x[:n] += -mul( div(w1, w2), x[n:])
			x_n := x.SubMatrix(n, 0)
			x_val := matrix.Mul(matrix.Div(w1, w2), x_n).Scale(-1.0)
			x.SubMatrix(0, 0, n, 1).Plus(x_val)
			checkpnt.Check("25-f", minor)

			// Solve for x[n:]:
			//
			//    (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n]

			err = lapack.Potrs(S, x)
			if err != nil {
				fmt.Printf("Potrs error: %s\n", err)
			}
			checkpnt.Check("30-f", minor)

			// Solve for x[n:]:
			//
			//    (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n]

			// w1 = (d1**-2 - d2**-2), w2 = (d1**-2 + d2**-2)
			w1 = matrix.Minus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv())
			w2 = matrix.Plus(matrix.Mul(d1, d1).Inv(), matrix.Mul(d2, d2).Inv())
			x_n = x.SubMatrix(0, 0, n, 1)

			// x[n:] += mul( d1**-2 - d2**-2, x[:n])
			x_val = matrix.Mul(w1, x_n)
			x.SubMatrix(n, 0, x_val.NumElements(), 1).Plus(x_val)
			checkpnt.Check("35-f", minor)

			// x[n:] = div( x[n:], d1**-2 + d2**-2)
			x_n = x.SubMatrix(n, 0)
			x_val = matrix.Div(x_n, w2)
			x.SubMatrix(n, 0, x_val.NumElements(), 1).Set(x_val)
			checkpnt.Check("40-f", minor)

			// x_n = x[:n], x-2n = x[n:2*n]
			x_n = x.SubMatrix(0, 0, n, 1)
			x_2n := x.SubMatrix(n, 0, n, 1)

			// z := z + W^-T * G*x
			// z[:n] += div( x[:n] - x[n:2*n], d1)
			x_val = matrix.Div(matrix.Minus(x_n, x_2n), d1)
			z.SubMatrix(0, 0, n, 1).Plus(x_val)
			checkpnt.Check("44-f", minor)

			// z[n:2*n] += div( -x[:n] - x[n:2*n], d2)
			x_val = matrix.Div(matrix.Plus(x_n, x_2n).Scale(-1.0), d2)
			z.SubMatrix(n, 0, n, 1).Plus(x_val)
			checkpnt.Check("48-f", minor)

			// z[2*n:] += As*x[:n]
			x_val = matrix.Times(As, x_n)
			z.SubMatrix(2*n, 0, x_val.NumElements(), 1).Plus(x_val)

			checkpnt.Check("50-f", minor)

			return nil
		}
		return
	}

	// matrix(n*[0.0] + n*[1.0])
	c := matrix.FloatZeros(2*n, 1)
	c.SubMatrix(n, 0).SetIndexes(1.0)

	h := matrix.FloatZeros(2*n+m+1, 1)
	h.SetIndexes(1.0, 2*n)
	// h[2*n+1:] = -b
	h.SubMatrix(2*n+1, 0).Plus(b).Scale(-1.0)
	G := &matrixFs{A}

	dims := sets.DSetNew("l", "q", "s")
	dims.Set("l", []int{2 * n})
	dims.Set("q", []int{m + 1})

	var solopts cvx.SolverOptions
	solopts.ShowProgress = true
	if maxIter > 0 {
		solopts.MaxIter = maxIter
	}
	if len(solver) > 0 {
		solopts.KKTSolverName = solver
	}
	return cvx.ConeLpCustomMatrix(c, G, h, nil, nil, dims, Fkkt, &solopts, nil, nil)
}
예제 #7
0
// Computes analytic center of A*x <= b with A m by n of rank n.
// We assume that b > 0 and the feasible set is bounded.
func acent(A, b *matrix.FloatMatrix, niters int) (x *matrix.FloatMatrix, ntdecrs []float64, err error) {

	err = nil
	if niters <= 0 {
		niters = MAXITERS
	}
	ntdecrs = make([]float64, 0, niters)

	if A.Rows() != b.Rows() {
		return nil, nil, errors.New("A.Rows() != b.Rows()")
	}

	m, n := A.Size()
	x = matrix.FloatZeros(n, 1)
	H := matrix.FloatZeros(n, n)
	// Helper m*n matrix
	Dmn := matrix.FloatZeros(m, n)

	for i := 0; i < niters; i++ {

		// Gradient is g = A^T * (1.0/(b - A*x)). d = 1.0/(b - A*x)
		// d is m*1 matrix, g is n*1 matrix
		d := matrix.Minus(b, matrix.Times(A, x)).Inv()
		g := matrix.Times(A.Transpose(), d)

		// Hessian is H = A^T * diag(1./(b-A*x))^2 * A.
		// in the original python code expression d[:,n*[0]] creates
		// a m*n matrix where each column is copy of column 0.
		// We do it here manually.
		for i := 0; i < n; i++ {
			Dmn.SetColumn(i, d)
		}

		// Function mul creates element wise product of matrices.
		Asc := matrix.Mul(Dmn, A)
		blas.SyrkFloat(Asc, H, 1.0, 0.0, linalg.OptTrans)

		// Newton step is v = H^-1 * g.
		v := matrix.Scale(g, -1.0)
		PosvFloat(H, v)

		// Directional derivative and Newton decrement.
		lam := blas.DotFloat(g, v)
		ntdecrs = append(ntdecrs, math.Sqrt(-lam))
		if ntdecrs[len(ntdecrs)-1] < TOL {
			return x, ntdecrs, err
		}

		// Backtracking line search.
		// y = d .* A*v
		y := matrix.Mul(d, matrix.Times(A, v))
		step := 1.0
		for 1-step*y.Max() < 0 {
			step *= BETA
		}

	search:
		for {
			// t = -step*y + 1 [e.g. t = 1 - step*y]
			t := matrix.Scale(y, -step).Add(1.0)

			// ts = sum(log(1-step*y))
			ts := t.Log().Sum()
			if -ts < ALPHA*step*lam {
				break search
			}
			step *= BETA
		}
		v.Scale(step)
		x.Plus(v)
	}
	// no solution !!
	err = errors.New(fmt.Sprintf("Iteration %d exhausted\n", niters))
	return x, ntdecrs, err
}