Beispiel #1
0
func Solve(a sparse.Matrix, b, xInit *mat64.Vector, settings *Settings, method Method) (result Result, err error) {
	stats := Stats{
		StartTime: time.Now(),
	}

	dim, c := a.Dims()
	if dim != c {
		panic("iterative: matrix is not square")
	}
	if xInit != nil && dim != xInit.Len() {
		panic("iterative: mismatched size of the initial guess")
	}
	if b.Len() != dim {
		panic("iterative: mismatched size of the right-hand side vector")
	}

	if xInit == nil {
		xInit = mat64.NewVector(dim, nil)
	}
	if settings == nil {
		settings = DefaultSettings(dim)
	}

	ctx := Context{
		X:        mat64.NewVector(dim, nil),
		Residual: mat64.NewVector(dim, nil),
	}
	// X = xInit
	ctx.X.CopyVec(xInit)
	if mat64.Norm(ctx.X, math.Inf(1)) > 0 {
		// Residual = Ax
		sparse.MulMatVec(ctx.Residual, 1, false, a, ctx.X)
		stats.MatVecMultiplies++
	}
	// Residual = Ax - b
	ctx.Residual.SubVec(ctx.Residual, b)

	if mat64.Norm(ctx.Residual, 2) >= settings.Tolerance {
		err = iterate(method, a, b, settings, &ctx, &stats)
	}

	result = Result{
		X:       ctx.X,
		Stats:   stats,
		Runtime: time.Since(stats.StartTime),
	}
	return result, err
}
Beispiel #2
0
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	b.dim = dim
	b.first = true

	x := mat64.NewVector(dim, loc.X)
	grad := mat64.NewVector(dim, loc.Gradient)
	b.x.CloneVec(x)
	b.grad.CloneVec(grad)

	b.y.Reset()
	b.s.Reset()
	b.tmp.Reset()

	if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
		b.invHess = mat64.NewSymDense(dim, nil)
	} else {
		b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
	}
	// The values of the inverse Hessian are initialized in the first call to
	// NextDirection.

	// Initial direction is just negative of the gradient because the Hessian
	// is an identity matrix.
	d := mat64.NewVector(dim, dir)
	d.ScaleVec(-1, grad)
	return 1 / mat64.Norm(d, 2)
}
Beispiel #3
0
func iterate(method Method, a sparse.Matrix, b *mat64.Vector, settings *Settings, ctx *Context, stats *Stats) error {
	bNorm := mat64.Norm(b, 2)
	if bNorm == 0 {
		bNorm = 1
	}

	op := method.Init(ctx)
	for {
		switch op {
		case NoOperation:

		case ComputeAp:
			ctx.Ap.ScaleVec(0, ctx.Ap)
			sparse.MulMatVec(ctx.Ap, 1, false, a, ctx.P)
			stats.MatVecMultiplies++

		case ComputeAq:
			ctx.Aq.ScaleVec(0, ctx.Aq)
			sparse.MulMatVec(ctx.Aq, 1, false, a, ctx.Q)
			stats.MatVecMultiplies++

		case SolvePreconditioner:
			// TODO(vladimir-ch): Add preconditioners.
			// Z = Residual
			ctx.Z.CopyVec(ctx.Residual)
			stats.PrecondionerSolves++

		case CheckConvergence:
			stats.Iterations++
			stats.Residual = mat64.Norm(ctx.Residual, 2) / bNorm
			fmt.Println(stats.Residual)
			if stats.Residual < settings.Tolerance {
				return nil
			}
			if stats.Iterations == settings.Iterations {
				return errors.New("iterative: reached iteration limit")
			}
		}

		op = method.Iterate(ctx)
	}
}
Beispiel #4
0
// Explodes returns true when pos is within radius of an explosion at centre,
// with respect to toroidal topology.
func (t Torus) Explodes(radius float64, center, pos *mat64.Vector) bool {
	diff := Vec2(0.0, 0.0)
	diff.CloneVec(center)
	for _, p := range t.Projections(pos) {
		diff.SubVec(center, p)
		if mat64.Norm(diff, 2) < radius {
			return true
		}
	}
	return false
}
Beispiel #5
0
// Factors returns matrices W and H that are non-negative factors of V within the
// specified tolerance and computation limits given initial non-negative solutions Wo
// and Ho.
func Factors(V, Wo, Ho *mat64.Dense, c Config) (W, H *mat64.Dense, ok bool) {
	to := time.Now()

	W = Wo
	H = Ho

	var (
		wr, wc = W.Dims()
		hr, hc = H.Dims()

		tmp mat64.Dense
	)

	var vhT mat64.Dense
	gW := mat64.NewDense(wr, wc, nil)
	tmp.Mul(H, H.T())
	gW.Mul(W, &tmp)
	vhT.Mul(V, H.T())
	gW.Sub(gW, &vhT)

	var wTv mat64.Dense
	gH := mat64.NewDense(hr, hc, nil)
	tmp.Reset()
	tmp.Mul(W.T(), W)
	gH.Mul(&tmp, H)
	wTv.Mul(W.T(), V)
	gH.Sub(gH, &wTv)

	var gHT, gWHT mat64.Dense
	gHT.Clone(gH.T())
	gWHT.Stack(gW, &gHT)

	grad := mat64.Norm(&gWHT, 2)
	tolW := math.Max(0.001, c.Tolerance) * grad
	tolH := tolW

	var (
		_ok  bool
		iter int
	)

	decFiltW := func(r, c int, v float64) float64 {
		// decFiltW is applied to gW, so v = gW.At(r, c).
		if v < 0 || W.At(r, c) > 0 {
			return v
		}
		return 0
	}

	decFiltH := func(r, c int, v float64) float64 {
		// decFiltH is applied to gH, so v = gH.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	var vT, hT, wT mat64.Dense
	for i := 0; i < c.MaxIter; i++ {
		gW.Apply(decFiltW, gW)
		gH.Apply(decFiltH, gH)

		var proj float64
		for _, v := range gW.RawMatrix().Data {
			proj += v * v
		}
		for _, v := range gH.RawMatrix().Data {
			proj += v * v
		}
		proj = math.Sqrt(proj)
		if proj < c.Tolerance*grad || time.Now().Sub(to) > c.Limit {
			break
		}

		vT.Clone(V.T())
		hT.Clone(H.T())
		wT.Clone(W.T())
		W, gW, iter, ok = nnlsSubproblem(&vT, &hT, &wT, tolW, c.MaxOuterSub, c.MaxInnerSub)
		if iter == 0 {
			tolW *= 0.1
		}

		wT.Reset()
		wT.Clone(W.T())
		W = &wT

		var gWT mat64.Dense
		gWT.Clone(gW.T())
		*gW = gWT

		H, gH, iter, _ok = nnlsSubproblem(V, W, H, tolH, c.MaxOuterSub, c.MaxInnerSub)
		ok = ok && _ok
		if iter == 0 {
			tolH *= 0.1
		}
	}

	return W, H, ok
}
Beispiel #6
0
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
	H = new(mat64.Dense)
	H.Clone(Ho)

	var WtV, WtW mat64.Dense
	WtV.Mul(W.T(), V)
	WtW.Mul(W.T(), W)

	alpha, beta := 1., 0.1

	decFilt := func(r, c int, v float64) float64 {
		// decFilt is applied to G, so v = G.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	G = new(mat64.Dense)
	for i = 0; i < outer; i++ {
		G.Mul(&WtW, H)
		G.Sub(G, &WtV)
		G.Apply(decFilt, G)

		if mat64.Norm(G, 2) < tol {
			break
		}

		var (
			reduce bool
			Hp     *mat64.Dense
			d, dQ  mat64.Dense
		)
		for j := 0; j < inner; j++ {
			var Hn mat64.Dense
			Hn.Scale(alpha, G)
			Hn.Sub(H, &Hn)
			Hn.Apply(posFilt, &Hn)

			d.Sub(&Hn, H)
			dQ.Mul(&WtW, &d)
			dQ.MulElem(&dQ, &d)
			d.MulElem(G, &d)

			sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0

			if j == 0 {
				reduce = !sufficient
				Hp = H
			}
			if reduce {
				if sufficient {
					H = &Hn
					ok = true
					break
				} else {
					alpha *= beta
				}
			} else {
				if !sufficient || mat64.Equal(Hp, &Hn) {
					H = Hp
					break
				} else {
					alpha /= beta
					Hp = &Hn
				}
			}
		}
	}

	return H, G, i, ok
}