Exemple #1
0
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
	// m = Number of Training Examples
	// n = Number of Features
	m, n := X.Dims()
	h := mat64.NewVector(m, nil)
	partials := mat64.NewVector(n, nil)
	new_theta := mat64.NewVector(n, nil)

Regression:
	for i := 0; i < maxIters; i++ {
		// Calculate partial derivatives
		h.MulVec(X, new_theta)
		for el := 0; el < m; el++ {
			val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
			h.SetVec(el, val)
		}
		partials.MulVec(X.T(), h)

		// Update theta values
		for el := 0; el < n; el++ {
			new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
			new_theta.SetVec(el, new_val)
		}

		// Check the "distance" to the local minumum
		dist := math.Sqrt(mat64.Dot(partials, partials))

		if dist <= tolerance {
			break Regression
		}
	}
	return new_theta
}
Exemple #2
0
// onesDotUnitary performs the equivalent of a Ddot of v with
// a ones vector of equal length. v must have have a unitary
// vector increment.
func onesDotUnitary(alpha float64, v *mat64.Vector) float64 {
	var sum float64
	for _, f := range v.RawVector().Data {
		sum += alpha * f
	}
	return sum
}
Exemple #3
0
// dotUnitary performs a simplified scatter-based Ddot operations on
// v and the receiver. v must have have a unitary vector increment.
func (r compressedRow) dotUnitary(v *mat64.Vector) float64 {
	var sum float64
	vec := v.RawVector().Data
	for _, e := range r {
		sum += vec[e.index] * e.value
	}
	return sum
}
func ExampleCholesky() {
	// Construct a symmetric positive definite matrix.
	tmp := mat64.NewDense(4, 4, []float64{
		2, 6, 8, -4,
		1, 8, 7, -2,
		2, 2, 1, 7,
		8, -2, -2, 1,
	})
	var a mat64.SymDense
	a.SymOuterK(1, tmp)

	fmt.Printf("a = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("    ")))

	// Compute the cholesky factorization.
	var chol mat64.Cholesky
	if ok := chol.Factorize(&a); !ok {
		fmt.Println("a matrix is not positive semi-definite.")
	}

	// Find the determinant.
	fmt.Printf("\nThe determinant of a is %0.4g\n\n", chol.Det())

	// Use the factorization to solve the system of equations a * x = b.
	b := mat64.NewVector(4, []float64{1, 2, 3, 4})
	var x mat64.Vector
	if err := x.SolveCholeskyVec(&chol, b); err != nil {
		fmt.Println("Matrix is near singular: ", err)
	}
	fmt.Println("Solve a * x = b")
	fmt.Printf("x = %0.4v\n", mat64.Formatted(&x, mat64.Prefix("    ")))

	// Extract the factorization and check that it equals the original matrix.
	var t mat64.TriDense
	t.LFromCholesky(&chol)
	var test mat64.Dense
	test.Mul(&t, t.T())
	fmt.Println()
	fmt.Printf("L * L^T = %0.4v\n", mat64.Formatted(&a, mat64.Prefix("          ")))

	// Output:
	// a = ⎡120  114   -4  -16⎤
	//     ⎢114  118   11  -24⎥
	//     ⎢ -4   11   58   17⎥
	//     ⎣-16  -24   17   73⎦
	//
	// The determinant of a is 1.543e+06
	//
	// Solve a * x = b
	// x = ⎡  -0.239⎤
	//     ⎢  0.2732⎥
	//     ⎢-0.04681⎥
	//     ⎣  0.1031⎦
	//
	// L * L^T = ⎡120  114   -4  -16⎤
	//           ⎢114  118   11  -24⎥
	//           ⎢ -4   11   58   17⎥
	//           ⎣-16  -24   17   73⎦
}
Exemple #5
0
func vectorDistance(vec1, vec2 *mat.Vector) (v float64) {
	result := mat.NewVector(vec1.Len(), nil)

	result.SubVec(vec1, vec2)
	result.MulElemVec(result, result)
	v = mat.Sum(result)

	return
}
Exemple #6
0
// Scatter copies the values of x into the corresponding locations in the dense
// vector y. Both vectors must have the same dimension.
func Scatter(y *mat64.Vector, x *Vector) {
	if x.N != y.Len() {
		panic("sparse: vector dimension mismatch")
	}

	raw := y.RawVector()
	for i, index := range x.Indices {
		raw.Data[index*raw.Inc] = x.Data[i]
	}
}
Exemple #7
0
// findIn returns the indexes of the values in vec that match scalar
func findIn(scalar float64, vec *mat.Vector) *mat.Vector {
	var result []float64

	for i := 0; i < vec.Len(); i++ {
		if scalar == vec.At(i, 0) {
			result = append(result, float64(i))
		}
	}

	return mat.NewVector(len(result), result)
}
Exemple #8
0
// Dot computes the dot product of the sparse vector x with the dense vector y.
// The vectors must have the same dimension.
func Dot(x *Vector, y *mat64.Vector) (dot float64) {
	if x.N != y.Len() {
		panic("sparse: vector dimension mismatch")
	}

	raw := y.RawVector()
	for i, index := range x.Indices {
		dot += x.Data[i] * raw.Data[index*raw.Inc]
	}
	return
}
Exemple #9
0
// Gather gathers entries given by indices of the dense vector y into the sparse
// vector x. Indices must not be nil.
func Gather(x *Vector, y *mat64.Vector, indices []int) {
	if indices == nil {
		panic("sparse: slice is nil")
	}

	x.reuseAs(y.Len(), len(indices))
	copy(x.Indices, indices)
	raw := y.RawVector()
	for i, index := range x.Indices {
		x.Data[i] = raw.Data[index*raw.Inc]
	}
}
Exemple #10
0
// Axpy scales the sparse vector x by alpha and adds the result to the dense
// vector y. If alpha is zero, y is not modified.
func Axpy(y *mat64.Vector, alpha float64, x *Vector) {
	if x.N != y.Len() {
		panic("sparse: vector dimension mismatch")
	}

	if alpha == 0 {
		return
	}
	raw := y.RawVector()
	for i, index := range x.Indices {
		raw.Data[index*raw.Inc] += alpha * x.Data[i]
	}
}
Exemple #11
0
// rowIndexIn returns a matrix contains the rows in indexes vector
func rowIndexIn(indexes *mat.Vector, M mat.Matrix) mat.Matrix {
	m := indexes.Len()
	_, n := M.Dims()
	Res := mat.NewDense(m, n, nil)

	for i := 0; i < m; i++ {
		Res.SetRow(i, mat.Row(
			nil,
			int(indexes.At(i, 0)),
			M))
	}

	return Res
}
Exemple #12
0
func Cost(x *mat64.Dense, y, theta *mat64.Vector) float64 {
	//initialize receivers
	m, _ := x.Dims()
	h := mat64.NewDense(m, 1, make([]float64, m))
	squaredErrors := mat64.NewDense(m, 1, make([]float64, m))

	//actual calculus
	h.Mul(x, theta)
	squaredErrors.Apply(func(r, c int, v float64) float64 {
		return math.Pow(h.At(r, c)-y.At(r, c), 2)
	}, h)
	j := mat64.Sum(squaredErrors) * 1.0 / (2.0 * float64(m))

	return j
}
Exemple #13
0
func Solve(a sparse.Matrix, b, xInit *mat64.Vector, settings *Settings, method Method) (result Result, err error) {
	stats := Stats{
		StartTime: time.Now(),
	}

	dim, c := a.Dims()
	if dim != c {
		panic("iterative: matrix is not square")
	}
	if xInit != nil && dim != xInit.Len() {
		panic("iterative: mismatched size of the initial guess")
	}
	if b.Len() != dim {
		panic("iterative: mismatched size of the right-hand side vector")
	}

	if xInit == nil {
		xInit = mat64.NewVector(dim, nil)
	}
	if settings == nil {
		settings = DefaultSettings(dim)
	}

	ctx := Context{
		X:        mat64.NewVector(dim, nil),
		Residual: mat64.NewVector(dim, nil),
	}
	// X = xInit
	ctx.X.CopyVec(xInit)
	if mat64.Norm(ctx.X, math.Inf(1)) > 0 {
		// Residual = Ax
		sparse.MulMatVec(ctx.Residual, 1, false, a, ctx.X)
		stats.MatVecMultiplies++
	}
	// Residual = Ax - b
	ctx.Residual.SubVec(ctx.Residual, b)

	if mat64.Norm(ctx.Residual, 2) >= settings.Tolerance {
		err = iterate(method, a, b, settings, &ctx, &stats)
	}

	result = Result{
		X:       ctx.X,
		Stats:   stats,
		Runtime: time.Since(stats.StartTime),
	}
	return result, err
}
Exemple #14
0
// Map produces a vector that is within the bounds of the
// rectangular manifold of toroidal space, given a vector
// that is on the torus but may be outside these bounds.
func (t Torus) Map(v *mat64.Vector) {
	x := v.At(0, 0)
	y := v.At(1, 0)

	remx := x
	right := t.W / 2
	if math.Abs(x) > right {
		remx = math.Mod(t.W, -x)
	}
	remy := y
	top := t.H / 2
	if math.Abs(y) > top {
		remy = math.Mod(t.H, -y)
	}

	v.SetVec(0, remx)
	v.SetVec(1, remy)
}
Exemple #15
0
// StdDev predicts the standard deviation of the function at x.
func (g *GP) StdDev(x []float64) float64 {
	if len(x) != g.inputDim {
		panic(badInputLength)
	}
	// nu_* = k(x_*, k_*) - k_*^T * K^-1 * k_*
	n := len(g.outputs)
	kstar := mat64.NewVector(n, nil)
	for i := 0; i < n; i++ {
		v := g.kernel.Distance(g.inputs.RawRowView(i), x)
		kstar.SetVec(i, v)
	}
	self := g.kernel.Distance(x, x)
	var tmp mat64.Vector
	tmp.SolveCholeskyVec(g.cholK, kstar)
	var tmp2 mat64.Vector
	tmp2.MulVec(kstar.T(), &tmp)
	rt, ct := tmp2.Dims()
	if rt != 1 || ct != 1 {
		panic("bad size")
	}
	return math.Sqrt(self-tmp2.At(0, 0)) * g.std
}
Exemple #16
0
// StdDevBatch predicts the standard deviation at a set of locations of x.
func (g *GP) StdDevBatch(std []float64, x mat64.Matrix) []float64 {
	r, c := x.Dims()
	if c != g.inputDim {
		panic(badInputLength)
	}
	if std == nil {
		std = make([]float64, r)
	}
	if len(std) != r {
		panic(badStorage)
	}
	// For a single point, the stddev is
	// 		sigma = k(x,x) - k_*^T * K^-1 * k_*
	// where k is the vector of kernels between the input points and the output points
	// For many points, the formula is:
	// 		nu_* = k(x_*, k_*) - k_*^T * K^-1 * k_*
	// This creates the full covariance matrix which is an rxr matrix. However,
	// the standard deviations are just the diagonal of this matrix. Instead, be
	// smart about it and compute the diagonal terms one at a time.
	kStar := g.formKStar(x)
	var tmp mat64.Dense
	tmp.SolveCholesky(g.cholK, kStar)

	// set k(x_*, x_*) into std then subtract k_*^T K^-1 k_* , computed one row at a time
	var tmp2 mat64.Vector
	row := make([]float64, c)
	for i := range std {
		for k := 0; k < c; k++ {
			row[k] = x.At(i, k)
		}
		std[i] = g.kernel.Distance(row, row)
		tmp2.MulVec(kStar.ColView(i).T(), tmp.ColView(i))
		rt, ct := tmp2.Dims()
		if rt != 1 && ct != 1 {
			panic("bad size")
		}
		std[i] -= tmp2.At(0, 0)
		std[i] = math.Sqrt(std[i])
	}
	// Need to scale the standard deviation to be in the same units as y.
	floats.Scale(g.std, std)
	return std
}
Exemple #17
0
func BlasVec2UserVec(v *mat64.Vector) UserVec {
	u := UserVec{}
	u.X = v.At(0, 0)
	u.Y = v.At(1, 0)
	return u
}
Exemple #18
0
// mulVecUnitary multiplies the receiver by the src vector, storing
// the result in dst. It assumes src and dst are the same length as m
// and that both have unitary vector increments.
func (m rowCompressedMatrix) mulVecUnitary(dst, src *mat64.Vector) {
	dMat := dst.RawVector().Data
	for i, r := range m {
		dMat[i] = r.dotUnitary(src)
	}
}
Exemple #19
0
func VectorToMatrix(vector *mat64.Vector) *mat64.Dense {
	vec := vector.RawVector()
	return mat64.NewDense(1, len(vec.Data), vec.Data)
}
Exemple #20
0
// ConditionNormal returns the Normal distribution that is the receiver conditioned
// on the input evidence. The returned multivariate normal has dimension
// n - len(observed), where n is the dimension of the original receiver. The updated
// mean and covariance are
//  mu = mu_un + sigma_{ob,un}^T * sigma_{ob,ob}^-1 (v - mu_ob)
//  sigma = sigma_{un,un} - sigma_{ob,un}^T * sigma_{ob,ob}^-1 * sigma_{ob,un}
// where mu_un and mu_ob are the original means of the unobserved and observed
// variables respectively, sigma_{un,un} is the unobserved subset of the covariance
// matrix, sigma_{ob,ob} is the observed subset of the covariance matrix, and
// sigma_{un,ob} are the cross terms. The elements of x_2 have been observed with
// values v. The dimension order is preserved during conditioning, so if the value
// of dimension 1 is observed, the returned normal represents dimensions {0, 2, ...}
// of the original Normal distribution.
//
// ConditionNormal returns {nil, false} if there is a failure during the update.
// Mathematically this is impossible, but can occur with finite precision arithmetic.
func (n *Normal) ConditionNormal(observed []int, values []float64, src *rand.Rand) (*Normal, bool) {
	if len(observed) == 0 {
		panic("normal: no observed value")
	}
	if len(observed) != len(values) {
		panic("normal: input slice length mismatch")
	}
	for _, v := range observed {
		if v < 0 || v >= n.Dim() {
			panic("normal: observed value out of bounds")
		}
	}

	ob := len(observed)
	unob := n.Dim() - ob
	obMap := make(map[int]struct{})
	for _, v := range observed {
		if _, ok := obMap[v]; ok {
			panic("normal: observed dimension occurs twice")
		}
		obMap[v] = struct{}{}
	}
	if len(observed) == n.Dim() {
		panic("normal: all dimensions observed")
	}
	unobserved := make([]int, 0, unob)
	for i := 0; i < n.Dim(); i++ {
		if _, ok := obMap[i]; !ok {
			unobserved = append(unobserved, i)
		}
	}
	mu1 := make([]float64, unob)
	for i, v := range unobserved {
		mu1[i] = n.mu[v]
	}
	mu2 := make([]float64, ob) // really v - mu2
	for i, v := range observed {
		mu2[i] = values[i] - n.mu[v]
	}

	n.setSigma()

	var sigma11, sigma22 mat64.SymDense
	sigma11.SubsetSym(n.sigma, unobserved)
	sigma22.SubsetSym(n.sigma, observed)

	sigma21 := mat64.NewDense(ob, unob, nil)
	for i, r := range observed {
		for j, c := range unobserved {
			v := n.sigma.At(r, c)
			sigma21.Set(i, j, v)
		}
	}

	var chol mat64.Cholesky
	ok := chol.Factorize(&sigma22)
	if !ok {
		return nil, ok
	}

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 (v - mu_2).
	v := mat64.NewVector(ob, mu2)
	var tmp, tmp2 mat64.Vector
	err := tmp.SolveCholeskyVec(&chol, v)
	if err != nil {
		return nil, false
	}
	tmp2.MulVec(sigma21.T(), &tmp)

	// Compute sigma_{2,1}^T * sigma_{2,2}^-1 * sigma_{2,1}.
	// TODO(btracey): Should this be a method of SymDense?
	var tmp3, tmp4 mat64.Dense
	err = tmp3.SolveCholesky(&chol, sigma21)
	if err != nil {
		return nil, false
	}
	tmp4.Mul(sigma21.T(), &tmp3)

	for i := range mu1 {
		mu1[i] += tmp2.At(i, 0)
	}

	// TODO(btracey): If tmp2 can constructed with a method, then this can be
	// replaced with SubSym.
	for i := 0; i < len(unobserved); i++ {
		for j := i; j < len(unobserved); j++ {
			v := sigma11.At(i, j)
			sigma11.SetSym(i, j, v-tmp4.At(i, j))
		}
	}
	return NewNormal(mu1, &sigma11, src)
}
Exemple #21
0
func dokMulMatVec(y *mat64.Vector, alpha float64, transA bool, a *DOK, x *mat64.Vector) {
	r, c := a.Dims()
	if transA {
		if r != x.Len() || c != y.Len() {
			panic("sparse: dimension mismatch")
		}
	} else {
		if r != y.Len() || c != x.Len() {
			panic("sparse: dimension mismatch")
		}
	}

	if alpha == 0 {
		return
	}

	xRaw := x.RawVector()
	yRaw := y.RawVector()
	if transA {
		for ij, aij := range a.data {
			yRaw.Data[ij[1]*yRaw.Inc] += alpha * aij * xRaw.Data[ij[0]*xRaw.Inc]
		}
	} else {
		for ij, aij := range a.data {
			yRaw.Data[ij[0]*yRaw.Inc] += alpha * aij * xRaw.Data[ij[1]*xRaw.Inc]
		}
	}
}
Exemple #22
0
func vec3(vec *mat64.Vector) Vec3 {
	return Vec3{vec.At(0, 0), vec.At(1, 0), vec.At(2, 0)}
}
Exemple #23
0
func simplex(initialBasic []int, c []float64, A mat64.Matrix, b []float64, tol float64) (float64, []float64, []int, error) {
	err := verifyInputs(initialBasic, c, A, b)
	if err != nil {
		if err == ErrUnbounded {
			return math.Inf(-1), nil, nil, ErrUnbounded
		}
		return math.NaN(), nil, nil, err
	}
	m, n := A.Dims()

	// There is at least one optimal solution to the LP which is at the intersection
	// to a set of constraint boundaries. For a standard form LP with m variables
	// and n equality constraints, at least m-n elements of x must equal zero
	// at optimality. The Simplex algorithm solves the standard-form LP by starting
	// at an initial constraint vertex and successively moving to adjacent constraint
	// vertices. At every vertex, the set of non-zero x values is the "basic
	// feasible solution". The list of non-zero x's are maintained in basicIdxs,
	// the respective columns of A are in ab, and the actual non-zero values of
	// x are in xb.
	//
	// The LP is equality constrained such that A * x = b. This can be expanded
	// to
	//  ab * xb + an * xn = b
	// where ab are the columns of a in the basic set, and an are all of the
	// other columns. Since each element of xn is zero by definition, this means
	// that for all feasible solutions xb = ab^-1 * b.
	//
	// Before the simplex algorithm can start, an initial feasible solution must
	// be found. If initialBasic is non-nil a feasible solution has been supplied.
	// Otherwise the "Phase I" problem must be solved to find an initial feasible
	// solution.

	var basicIdxs []int // The indices of the non-zero x values.
	var ab *mat64.Dense // The subset of columns of A listed in basicIdxs.
	var xb []float64    // The non-zero elements of x. xb = ab^-1 b

	if initialBasic != nil {
		// InitialBasic supplied. Panic if incorrect length or infeasible.
		if len(initialBasic) != m {
			panic("lp: incorrect number of initial vectors")
		}
		ab = extractColumns(A, initialBasic)
		xb, err = initializeFromBasic(ab, b)
		if err != nil {
			panic(err)
		}
		basicIdxs = make([]int, len(initialBasic))
		copy(basicIdxs, initialBasic)
	} else {
		// No inital basis supplied. Solve the PhaseI problem.
		basicIdxs, ab, xb, err = findInitialBasic(A, b)
		if err != nil {
			return math.NaN(), nil, nil, err
		}
	}

	// basicIdxs contains the indexes for an initial feasible solution,
	// ab contains the extracted columns of A, and xb contains the feasible
	// solution. All x not in the basic set are 0 by construction.

	// nonBasicIdx is the set of nonbasic variables.
	nonBasicIdx := make([]int, 0, n-m)
	inBasic := make(map[int]struct{})
	for _, v := range basicIdxs {
		inBasic[v] = struct{}{}
	}
	for i := 0; i < n; i++ {
		_, ok := inBasic[i]
		if !ok {
			nonBasicIdx = append(nonBasicIdx, i)
		}
	}

	// cb is the subset of c for the basic variables. an and cn
	// are the equivalents to ab and cb but for the nonbasic variables.
	cb := make([]float64, len(basicIdxs))
	for i, idx := range basicIdxs {
		cb[i] = c[idx]
	}
	cn := make([]float64, len(nonBasicIdx))
	for i, idx := range nonBasicIdx {
		cn[i] = c[idx]
	}
	an := extractColumns(A, nonBasicIdx)

	bVec := mat64.NewVector(len(b), b)
	cbVec := mat64.NewVector(len(cb), cb)

	// Temporary data needed each iteration. (Described later)
	r := make([]float64, n-m)
	move := make([]float64, m)

	// Solve the linear program starting from the initial feasible set. This is
	// the "Phase 2" problem.
	//
	// Algorithm:
	// 1) Compute the "reduced costs" for the non-basic variables. The reduced
	// costs are the lagrange multipliers of the constraints.
	// 	 r = cn - an^T * ab^-T * cb
	// 2) If all of the reduced costs are positive, no improvement is possible,
	// and the solution is optimal (xn can only increase because of
	// non-negativity constraints). Otherwise, the solution can be improved and
	// one element will be exchanged in the basic set.
	// 3) Choose the x_n with the most negative value of r. Call this value xe.
	// This variable will be swapped into the basic set.
	// 4) Increase xe until the next constraint boundary is met. This will happen
	// when the first element in xb becomes 0. The distance xe can increase before
	// a given element in xb becomes negative can be found from
	//	xb = Ab^-1 b - Ab^-1 An xn
	//     = Ab^-1 b - Ab^-1 Ae xe
	//     = bhat + d x_e
	//  xe = bhat_i / - d_i
	// where Ae is the column of A corresponding to xe.
	// The constraining basic index is the first index for which this is true,
	// so remove the element which is min_i (bhat_i / -d_i), assuming d_i is negative.
	// If no d_i is less than 0, then the problem is unbounded.
	// 5) If the new xe is 0 (that is, bhat_i == 0), then this location is at
	// the intersection of several constraints. Use the Bland rule instead
	// of the rule in step 4 to avoid cycling.
	for {
		// Compute reduced costs -- r = cn - an^T ab^-T cb
		var tmp mat64.Vector
		err = tmp.SolveVec(ab.T(), cbVec)
		if err != nil {
			break
		}
		data := make([]float64, n-m)
		tmp2 := mat64.NewVector(n-m, data)
		tmp2.MulVec(an.T(), &tmp)
		floats.SubTo(r, cn, data)

		// Replace the most negative element in the simplex. If there are no
		// negative entries then the optimal solution has been found.
		minIdx := floats.MinIdx(r)
		if r[minIdx] >= -tol {
			break
		}

		for i, v := range r {
			if math.Abs(v) < rRoundTol {
				r[i] = 0
			}
		}

		// Compute the moving distance.
		err = computeMove(move, minIdx, A, ab, xb, nonBasicIdx)
		if err != nil {
			if err == ErrUnbounded {
				return math.Inf(-1), nil, nil, ErrUnbounded
			}
			break
		}

		// Replace the basic index along the tightest constraint.
		replace := floats.MinIdx(move)
		if move[replace] <= 0 {
			replace, minIdx, err = replaceBland(A, ab, xb, basicIdxs, nonBasicIdx, r, move)
			if err != nil {
				if err == ErrUnbounded {
					return math.Inf(-1), nil, nil, ErrUnbounded
				}
				break
			}
		}

		// Replace the constrained basicIdx with the newIdx.
		basicIdxs[replace], nonBasicIdx[minIdx] = nonBasicIdx[minIdx], basicIdxs[replace]
		cb[replace], cn[minIdx] = cn[minIdx], cb[replace]
		tmpCol1 := mat64.Col(nil, replace, ab)
		tmpCol2 := mat64.Col(nil, minIdx, an)
		ab.SetCol(replace, tmpCol2)
		an.SetCol(minIdx, tmpCol1)

		// Compute the new xb.
		xbVec := mat64.NewVector(len(xb), xb)
		err = xbVec.SolveVec(ab, bVec)
		if err != nil {
			break
		}
	}
	// Found the optimum successfully or died trying. The basic variables get
	// their values, and the non-basic variables are all zero.
	opt := floats.Dot(cb, xb)
	xopt := make([]float64, n)
	for i, v := range basicIdxs {
		xopt[v] = xb[i]
	}
	return opt, xopt, basicIdxs, err
}
Exemple #24
0
func testSimplex(t *testing.T, initialBasic []int, c []float64, a mat64.Matrix, b []float64, convergenceTol float64) error {
	primalOpt, primalX, _, errPrimal := simplex(initialBasic, c, a, b, convergenceTol)
	if errPrimal == nil {
		// No error solving the simplex, check that the solution is feasible.
		var bCheck mat64.Vector
		bCheck.MulVec(a, mat64.NewVector(len(primalX), primalX))
		if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(b), b), 1e-10) {
			t.Errorf("No error in primal but solution infeasible")
		}
	}

	primalInfeasible := errPrimal == ErrInfeasible
	primalUnbounded := errPrimal == ErrUnbounded
	primalBounded := errPrimal == nil
	primalASingular := errPrimal == ErrSingular
	primalZeroRow := errPrimal == ErrZeroRow
	primalZeroCol := errPrimal == ErrZeroColumn

	primalBad := !primalInfeasible && !primalUnbounded && !primalBounded && !primalASingular && !primalZeroRow && !primalZeroCol

	// It's an error if it's not one of the known returned errors. If it's
	// singular the problem is undefined and so the result cannot be compared
	// to the dual.
	if errPrimal == ErrSingular || primalBad {
		if primalBad {
			t.Errorf("non-known error returned: %s", errPrimal)
		}
		return errPrimal
	}

	// Compare the result to the answer found from solving the dual LP.

	// Construct and solve the dual LP.
	// Standard Form:
	//  minimize c^T * x
	//    subject to  A * x = b, x >= 0
	// The dual of this problem is
	//  maximize -b^T * nu
	//   subject to A^T * nu + c >= 0
	// Which is
	//   minimize b^T * nu
	//   subject to -A^T * nu <= c

	negAT := &mat64.Dense{}
	negAT.Clone(a.T())
	negAT.Scale(-1, negAT)
	cNew, aNew, bNew := Convert(b, negAT, c, nil, nil)

	dualOpt, dualX, _, errDual := simplex(nil, cNew, aNew, bNew, convergenceTol)
	if errDual == nil {
		// Check that the dual is feasible
		var bCheck mat64.Vector
		bCheck.MulVec(aNew, mat64.NewVector(len(dualX), dualX))
		if !mat64.EqualApprox(&bCheck, mat64.NewVector(len(bNew), bNew), 1e-10) {
			t.Errorf("No error in dual but solution infeasible")
		}
	}

	// Check about the zero status.
	if errPrimal == ErrZeroRow || errPrimal == ErrZeroColumn {
		return errPrimal
	}

	// If the primal problem is feasible, then the primal and the dual should
	// be the same answer. We have flopped the sign in the dual (minimizing
	// b^T *nu instead of maximizing -b^T*nu), so flip it back.
	if errPrimal == nil {
		if errDual != nil {
			fmt.Println("errDual", errDual)
			panic("here")
			t.Errorf("Primal feasible but dual errored: %s", errDual)
		}
		dualOpt *= -1
		if !floats.EqualWithinAbsOrRel(dualOpt, primalOpt, convergenceTol, convergenceTol) {
			t.Errorf("Primal and dual value mismatch. Primal %v, dual %v.", primalOpt, dualOpt)
		}
	}
	// If the primal problem is unbounded, then the dual should be infeasible.
	if errPrimal == ErrUnbounded && errDual != ErrInfeasible {
		t.Errorf("Primal unbounded but dual not infeasible. ErrDual = %s", errDual)
	}

	// If the dual is unbounded, then the primal should be infeasible.
	if errDual == ErrUnbounded && errPrimal != ErrInfeasible {
		t.Errorf("Dual unbounded but primal not infeasible. ErrDual = %s", errPrimal)
	}

	// If the primal is infeasible, then the dual should be either infeasible
	// or unbounded.
	if errPrimal == ErrInfeasible {
		if errDual != ErrUnbounded && errDual != ErrInfeasible && errDual != ErrZeroColumn {
			t.Errorf("Primal infeasible but dual not infeasible or unbounded: %s", errDual)
		}
	}

	return errPrimal
}
Exemple #25
0
func MultiHypothesis(x *mat64.Dense, theta *mat64.Vector) *mat64.Vector {
	var res mat64.Dense
	res.Mul(theta.T(), x)
	return res.RowView(0)
}
Exemple #26
0
// Throttle behaves as Move but scales the speed
func (r Rudder) Jolt(pos *mat64.Vector, scale float64) {
	pos.AddScaledVec(pos, scale*r.S, r.D)
}
Exemple #27
0
func csrMulMatVec(y *mat64.Vector, alpha float64, transA bool, a *CSR, x *mat64.Vector) {
	r, c := a.Dims()
	if transA {
		if r != x.Len() || c != y.Len() {
			panic("sparse: dimension mismatch")
		}
	} else {
		if r != y.Len() || c != x.Len() {
			panic("sparse: dimension mismatch")
		}
	}

	if alpha == 0 {
		return
	}

	yRaw := y.RawVector()
	if transA {
		row := Vector{N: y.Len()}
		for i := 0; i < r; i++ {
			start := a.rowIndex[i]
			end := a.rowIndex[i+1]
			row.Data = a.values[start:end]
			row.Indices = a.columns[start:end]
			Axpy(y, alpha*x.At(i, 0), &row)
		}
	} else {
		row := Vector{N: x.Len()}
		for i := 0; i < r; i++ {
			start := a.rowIndex[i]
			end := a.rowIndex[i+1]
			row.Data = a.values[start:end]
			row.Indices = a.columns[start:end]
			yRaw.Data[i*yRaw.Inc] += alpha * Dot(&row, x)
		}
	}
}