Exemplo n.º 1
0
func (s *S) TestSetRowColumn(c *check.C) {
	for _, as := range [][][]float64{
		{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}},
		{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}, {10, 11, 12}},
		{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
	} {
		for ri, row := range as {
			a := NewDense(flatten(as))
			t := &Dense{}
			t.Clone(a)
			a.SetRow(ri, make([]float64, a.mat.Cols))
			t.Sub(t, a)
			c.Check(t.Norm(0), check.Equals, floats.Norm(row, 2))
		}

		for ci := range as[0] {
			a := NewDense(flatten(as))
			t := &Dense{}
			t.Clone(a)
			a.SetCol(ci, make([]float64, a.mat.Rows))
			col := make([]float64, a.mat.Rows)
			for j := range col {
				col[j] = float64(ci + 1 + j*a.mat.Cols)
			}
			t.Sub(t, a)
			c.Check(t.Norm(0), check.Equals, floats.Norm(col, 2))
		}
	}
}
Exemplo n.º 2
0
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	b.dim = dim

	b.x = resize(b.x, dim)
	copy(b.x, loc.X)
	b.grad = resize(b.grad, dim)
	copy(b.grad, loc.Gradient)

	b.y = resize(b.y, dim)
	b.s = resize(b.s, dim)
	b.tmp = resize(b.tmp, dim)
	b.yVec = mat64.NewVector(dim, b.y)
	b.sVec = mat64.NewVector(dim, b.s)
	b.tmpVec = mat64.NewVector(dim, b.tmp)

	if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
		b.invHess = mat64.NewSymDense(dim, nil)
	} else {
		b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
	}

	// The values of the hessian are initialized in the first call to NextDirection

	// initial direcion is just negative of gradient because the hessian is 1
	copy(dir, loc.Gradient)
	floats.Scale(-1, dir)

	b.first = true

	return 1 / floats.Norm(dir, 2)
}
Exemplo n.º 3
0
// SetCurr sets the current value of the float
// Assumes that the length does not change per iteration.
func (f *Floats) SetCurrent(val []float64) {

	copy(f.previous, f.current)
	copy(f.current, val)
	floats.SubTo(f.diff, f.current, f.previous)
	f.norm = floats.Norm(f.current, 2)
}
Exemplo n.º 4
0
func TestMinimalSurface(t *testing.T) {
	for _, size := range [][2]int{
		{20, 30},
		{30, 30},
		{50, 40},
	} {
		f := NewMinimalSurface(size[0], size[1])
		x0 := f.InitX()
		grad := make([]float64, len(x0))
		f.Grad(grad, x0)
		fdGrad := fd.Gradient(nil, f.Func, x0, &fd.Settings{Formula: fd.Central})

		// Test that the numerical and analytical gradients agree.
		dist := floats.Distance(grad, fdGrad, math.Inf(1))
		if dist > 1e-9 {
			t.Errorf("grid %v x %v: numerical and analytical gradient do not match. |fdGrad - grad|_∞ = %v",
				size[0], size[1], dist)
		}

		// Test that the gradient at the minimum is small enough.
		// In some sense this test is not completely correct because ExactX
		// returns the exact solution to the continuous problem projected on the
		// grid, not the exact solution to the discrete problem which we are
		// solving. This is the reason why a relatively loose tolerance 1e-4
		// must be used.
		xSol := f.ExactX()
		f.Grad(grad, xSol)
		norm := floats.Norm(grad, math.Inf(1))
		if norm > 1e-4 {
			t.Errorf("grid %v x %v: gradient at the minimum not small enough. |grad|_∞ = %v",
				size[0], size[1], norm)
		}
	}
}
Exemplo n.º 5
0
func Solve(a sparse.Matrix, b, xInit []float64, settings *Settings, method Method) (result Result, err error) {
	stats := Stats{
		StartTime: time.Now(),
	}

	dim := len(xInit)
	if dim == 0 {
		panic("iterative: invalid dimension")
	}

	r, c := a.Dims()
	if r != c {
		panic("iterative: matrix is not square")
	}
	if c != dim {
		panic("iterative: mismatched size of the matrix")
	}
	if len(b) != dim {
		panic("iterative: mismatched size of the right-hand side vector")
	}

	if settings == nil {
		settings = DefaultSettings(dim)
	}

	ctx := Context{
		X:        make([]float64, dim),
		Residual: make([]float64, dim),
	}
	copy(ctx.X, xInit)
	copy(ctx.Residual, b)
	if floats.Norm(ctx.X, math.Inf(1)) > 0 {
		sparse.MulMatVec(-1, false, a, ctx.X, 1, 1, ctx.Residual, 1)
		stats.MatVecMultiplies++
	}

	if floats.Norm(ctx.Residual, 2) >= settings.Tolerance {
		err = iterate(method, a, b, settings, &ctx, &stats)
	}

	result = Result{
		X:       ctx.X,
		Stats:   stats,
		Runtime: time.Since(stats.StartTime),
	}
	return result, err
}
Exemplo n.º 6
0
func checkConvergence(loc *Location, iterType IterationType, stats *Stats, settings *Settings) Status {
	if iterType == MajorIteration || iterType == InitIteration {
		if loc.Gradient != nil {
			norm := floats.Norm(loc.Gradient, math.Inf(1))
			if norm < settings.GradientThreshold {
				return GradientThreshold
			}
		}
		if loc.F < settings.FunctionThreshold {
			return FunctionThreshold
		}
	}

	if iterType == MajorIteration && settings.FunctionConverge != nil {
		status := settings.FunctionConverge.FunctionConverged(loc.F)
		if status != NotTerminated {
			return status
		}
	}

	// Check every step for negative infinity because it could break the
	// linesearches and -inf is the best you can do anyway.
	if math.IsInf(loc.F, -1) {
		return FunctionNegativeInfinity
	}

	if settings.FuncEvaluations > 0 {
		if stats.FuncEvaluations >= settings.FuncEvaluations {
			return FunctionEvaluationLimit
		}
	}

	if settings.GradEvaluations > 0 {
		if stats.GradEvaluations >= settings.GradEvaluations {
			return GradientEvaluationLimit
		}
	}

	if settings.HessEvaluations > 0 {
		if stats.HessEvaluations >= settings.HessEvaluations {
			return HessianEvaluationLimit
		}
	}

	if settings.Runtime > 0 {
		// TODO(vladimir-ch): It would be nice to update Runtime here.
		if stats.Runtime >= settings.Runtime {
			return RuntimeLimit
		}
	}

	if iterType == MajorIteration && settings.MajorIterations > 0 {
		if stats.MajorIterations >= settings.MajorIterations {
			return IterationLimit
		}
	}
	return NotTerminated
}
Exemplo n.º 7
0
// Init sets the initial value of the variable to be
// used by the optimizer. (for example, initial location, initial
// function value, etc.)
func (f *Floats) SetInit(val []float64) {
	if len(f.init) > len(val) {
		f.init = f.init[:len(val)]
	} else {
		f.init = make([]float64, len(val))
	}
	copy(f.init, val)
	f.norm = floats.Norm(val, 2)
}
Exemplo n.º 8
0
// SetCurr sets a new value  for the current location and updates the
// delta from the last value
func (o *Objective) SetCurr(f []float64) {
	// Find the current delta in the values
	o.delta = math.Abs(o.Floats.norm - floats.Norm(f, 2))
	// Set the initial delta if necessary
	if math.IsNaN(o.initDelta) {
		o.initDelta = o.delta
	}
	// Set the new current value
	o.Floats.SetCurr(f)
}
Exemplo n.º 9
0
func (o OneNorm) LossDeriv(parameters, derivative []float64) float64 {
	loss := o.Gamma * floats.Norm(parameters, 2)
	for i, p := range parameters {
		derivative[i] = o.Gamma
		if p < 0 {
			derivative[i] = -o.Gamma
		}
	}
	return loss
}
Exemplo n.º 10
0
func (o OneNorm) LossAddDeriv(parameters, derivative []float64) float64 {
	loss := o.Gamma * floats.Norm(parameters, 2)
	for i, p := range parameters {
		add := o.Gamma
		if p < 0 {
			add = -o.Gamma
		}
		derivative[i] += add
	}
	return loss
}
Exemplo n.º 11
0
// Status checks if either of the tolerances have converged
func (f *Floats) Status() common.Status {
	s := f.AbsTol.Status(f.norm)
	if s != common.Continue {
		return s
	}
	s = f.ChangeTol.Status(floats.Norm(f.diff, 2))
	if s != common.Continue {
		return s
	}
	return s
}
Exemplo n.º 12
0
// Initialize initializes the Float to be ready to optimize by
// setting the history slice to have length zero, and setting
// the current value equal to the initial value
// This should be called by the optimizer at the beginning of
// the optimization
func (f *Floats) Initialize() error {
	f.hist = f.hist[:0]

	if f.init == nil {
		return errors.New("multivariate: inial slice is nil")
	}

	f.curr = make([]float64, len(f.init))
	copy(f.curr, f.init)
	f.opt = nil
	f.normInit = floats.Norm(f.curr, 2)
	return nil
}
Exemplo n.º 13
0
func iterate(method Method, a sparse.Matrix, b []float64, settings *Settings, ctx *Context, stats *Stats) error {
	dim := len(ctx.X)
	bNorm := floats.Norm(b, 2)
	if bNorm == 0 {
		bNorm = 1
	}

	request := method.Init(ctx)
	for {
		switch request {
		case NoOperation:

		case ComputeAp:
			ctx.Ap = resize(ctx.Ap, dim)
			sparse.MulMatVec(1, false, a, ctx.P, 1, 0, ctx.Ap, 1)
			stats.MatVecMultiplies++

		case SolvePreconditioner:
			ctx.Z = resize(ctx.Z, dim)
			copy(ctx.Z, ctx.Residual)
			stats.PrecondionerSolves++

		case CheckConvergence:
			stats.Iterations++
			stats.Residual = floats.Norm(ctx.Residual, 2) / bNorm
			fmt.Println(stats.Residual)
			if stats.Residual < settings.Tolerance {
				return nil
			}
			if stats.Iterations == settings.Iterations {
				return errors.New("iterative: reached iteration limit")
			}
		}

		request = method.Iterate(ctx)
	}
}
Exemplo n.º 14
0
func TestSetRowColumn(t *testing.T) {
	for _, as := range [][][]float64{
		{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}},
		{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}, {10, 11, 12}},
		{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
	} {
		for ri, row := range as {
			a := NewDense(flatten(as))
			m := &Dense{}
			m.Clone(a)
			a.SetRow(ri, make([]float64, a.mat.Cols))
			m.Sub(m, a)
			nt := Norm(m, 2)
			nr := floats.Norm(row, 2)
			if math.Abs(nt-nr) > 1e-14 {
				t.Errorf("Row %d norm mismatch, want: %g, got: %g", ri, nr, nt)
			}
		}

		for ci := range as[0] {
			a := NewDense(flatten(as))
			m := &Dense{}
			m.Clone(a)
			a.SetCol(ci, make([]float64, a.mat.Rows))
			col := make([]float64, a.mat.Rows)
			for j := range col {
				col[j] = float64(ci + 1 + j*a.mat.Cols)
			}
			m.Sub(m, a)
			nt := Norm(m, 2)
			nc := floats.Norm(col, 2)
			if math.Abs(nt-nc) > 1e-14 {
				t.Errorf("Column %d norm mismatch, want: %g, got: %g", ci, nc, nt)
			}
		}
	}
}
Exemplo n.º 15
0
func (p *Printer) Record(loc *Location, _ EvaluationType, iter IterationType, stats *Stats) error {
	if iter != MajorIteration && iter != InitIteration && iter != PostIteration {
		return nil
	}

	// Print values always on PostIteration or when ValueInterval has elapsed.
	printValues := time.Since(p.lastValue) > p.ValueInterval || iter == PostIteration
	if !printValues {
		// Return early if not printing anything.
		return nil
	}

	// Print heading when HeadingInterval lines have been printed, but never on PostIteration.
	printHeading := p.lastHeading >= p.HeadingInterval && iter != PostIteration
	if printHeading {
		p.lastHeading = 1
	} else {
		p.lastHeading++
	}

	if printHeading {
		headings := "\n" + fmt.Sprintf(printerBaseTmpl, printerHeadings[0], printerHeadings[1], printerHeadings[2], printerHeadings[3])
		if loc.Gradient != nil {
			headings += fmt.Sprintf(printerGradTmpl, printerHeadings[4], printerHeadings[5])
		}
		if loc.Hessian != nil {
			headings += fmt.Sprintf(printerHessTmpl, printerHeadings[6])
		}
		_, err := fmt.Fprintln(p.Writer, headings)
		if err != nil {
			return err
		}
	}

	values := fmt.Sprintf(printerBaseTmpl, stats.MajorIterations, stats.Runtime, stats.FuncEvaluations, loc.F)
	if loc.Gradient != nil {
		values += fmt.Sprintf(printerGradTmpl, stats.GradEvaluations, floats.Norm(loc.Gradient, math.Inf(1)))
	}
	if loc.Hessian != nil {
		values += fmt.Sprintf(printerHessTmpl, stats.HessEvaluations)
	}
	_, err := fmt.Fprintln(p.Writer, values)
	if err != nil {
		return err
	}

	p.lastValue = time.Now()
	return nil
}
Exemplo n.º 16
0
// Explicitly forms vectors and computes normalized dot product.
func cosCorrMultiNaive(f, g *rimg64.Multi) *rimg64.Image {
	h := rimg64.New(f.Width-g.Width+1, f.Height-g.Height+1)
	n := g.Width * g.Height * g.Channels
	a := make([]float64, n)
	b := make([]float64, n)
	for i := 0; i < h.Width; i++ {
		for j := 0; j < h.Height; j++ {
			a = a[:0]
			b = b[:0]
			for u := 0; u < g.Width; u++ {
				for v := 0; v < g.Height; v++ {
					for p := 0; p < g.Channels; p++ {
						a = append(a, f.At(i+u, j+v, p))
						b = append(b, g.At(u, v, p))
					}
				}
			}
			floats.Scale(1/floats.Norm(a, 2), a)
			floats.Scale(1/floats.Norm(b, 2), b)
			h.Set(i, j, floats.Dot(a, b))
		}
	}
	return h
}
Exemplo n.º 17
0
func (l *LBFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	l.dim = dim

	if l.Store == 0 {
		l.Store = 15
	}

	l.oldest = l.Store - 1 // the first vector will be put in at 0

	l.x = resize(l.x, dim)
	l.grad = resize(l.grad, dim)
	copy(l.x, loc.X)
	copy(l.grad, loc.Gradient)

	l.y = resize(l.y, dim)
	l.s = resize(l.s, dim)
	l.a = resize(l.a, l.Store)
	l.rhoHist = resize(l.rhoHist, l.Store)

	if cap(l.yHist) < l.Store {
		n := make([][]float64, l.Store-cap(l.yHist))
		l.yHist = append(l.yHist, n...)
	}
	if cap(l.sHist) < l.Store {
		n := make([][]float64, l.Store-cap(l.sHist))
		l.sHist = append(l.sHist, n...)
	}
	l.yHist = l.yHist[:l.Store]
	l.sHist = l.sHist[:l.Store]
	for i := range l.sHist {
		l.sHist[i] = resize(l.sHist[i], dim)
		for j := range l.sHist[i] {
			l.sHist[i][j] = 0
		}
	}
	for i := range l.yHist {
		l.yHist[i] = resize(l.yHist[i], dim)
		for j := range l.yHist[i] {
			l.yHist[i][j] = 0
		}
	}

	copy(dir, loc.Gradient)
	floats.Scale(-1, dir)

	return 1 / floats.Norm(dir, 2)
}
Exemplo n.º 18
0
// checkConvergence returns NotTerminated if the Location does not satisfy the
// convergence criteria given by settings. Otherwise a corresponding status is
// returned.
// Unlike checkLimits, checkConvergence is called by Local only at MajorIterations.
func checkConvergence(loc *Location, settings *Settings) Status {
	if loc.Gradient != nil {
		norm := floats.Norm(loc.Gradient, math.Inf(1))
		if norm < settings.GradientThreshold {
			return GradientThreshold
		}
	}

	if loc.F < settings.FunctionThreshold {
		return FunctionThreshold
	}

	if settings.FunctionConverge != nil {
		return settings.FunctionConverge.FunctionConverged(loc.F)
	}

	return NotTerminated
}
Exemplo n.º 19
0
func (l *LBFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
	dim := len(loc.X)
	l.dim = dim
	l.oldest = 0

	l.a = resize(l.a, l.Store)
	l.rho = resize(l.rho, l.Store)
	l.y = l.initHistory(l.y)
	l.s = l.initHistory(l.s)

	l.x = resize(l.x, dim)
	copy(l.x, loc.X)

	l.grad = resize(l.grad, dim)
	copy(l.grad, loc.Gradient)

	copy(dir, loc.Gradient)
	floats.Scale(-1, dir)
	return 1 / floats.Norm(dir, 2)
}
Exemplo n.º 20
0
// Initialize initializes the Float to be ready to optimize by
// setting the history slice to have length zero, and setting
// the current value equal to the initial value
// This should be called by the optimizer at the beginning of
// the optimization
func (f *Floats) Init() error {
	f.Hist = f.Hist[:0]

	if f.Initial == nil {
		return errors.New("multivariate: initial slice is nil")
	}

	f.length = len(f.Initial)

	f.diff = make([]float64, len(f.Initial))
	f.current = make([]float64, len(f.Initial))
	f.previous = make([]float64, len(f.Initial))
	for i := range f.previous {
		f.previous[i] = math.Inf(1)
	}
	copy(f.current, f.Initial)
	f.norm = floats.Norm(f.current, 2)
	floats.SubTo(f.diff, f.current, f.previous)

	f.AddToHist(f.Initial)
	//f.prevNorm = math.Inf(1)
	return nil
}
Exemplo n.º 21
0
func testLocal(t *testing.T, tests []unconstrainedTest, method Method) {
	for _, test := range tests {
		if test.long && testing.Short() {
			continue
		}

		settings := DefaultSettings()
		settings.Recorder = nil
		if method != nil && method.Needs().Gradient {
			// Turn off function convergence checks for gradient-based methods.
			settings.FunctionConverge = nil
		} else {
			if test.fIter == 0 {
				test.fIter = 20
			}
			settings.FunctionConverge.Iterations = test.fIter
			if test.fAbsTol == 0 {
				test.fAbsTol = 1e-12
			}
			settings.FunctionConverge.Absolute = test.fAbsTol
		}
		if test.gradTol == 0 {
			test.gradTol = 1e-12
		}
		settings.GradientThreshold = test.gradTol

		result, err := Local(test.p, test.x, settings, method)
		if err != nil {
			t.Errorf("error finding minimum (%v) for:\n%v", err, test)
			continue
		}
		if result == nil {
			t.Errorf("nil result without error for:\n%v", test)
			continue
		}

		// Check that the function value at the found optimum location is
		// equal to result.F.
		optF := test.p.Func(result.X)
		if optF != result.F {
			t.Errorf("Function value at the optimum location %v not equal to the returned value %v for:\n%v",
				optF, result.F, test)
		}
		if result.Gradient != nil {
			// Evaluate the norm of the gradient at the found optimum location.
			g := make([]float64, len(test.x))
			test.p.Grad(result.X, g)

			if !floats.Equal(result.Gradient, g) {
				t.Errorf("Gradient at the optimum location not equal to the returned value for:\n%v", test)
			}

			optNorm := floats.Norm(g, math.Inf(1))
			// Check that the norm of the gradient at the found optimum location is
			// smaller than the tolerance.
			if optNorm >= settings.GradientThreshold {
				t.Errorf("Norm of the gradient at the optimum location %v not smaller than tolerance %v for:\n%v",
					optNorm, settings.GradientThreshold, test)
			}
		}

		if method == nil {
			// The tests below make sense only if the method used is known.
			continue
		}

		if !method.Needs().Gradient && !method.Needs().Hessian {
			// Gradient-free tests can correctly terminate only with
			// FunctionConvergence status.
			if result.Status != FunctionConvergence {
				t.Errorf("Status not %v, %v instead", FunctionConvergence, result.Status)
			}
		}

		// We are going to restart the solution using known initial data, so
		// evaluate them.
		settings.UseInitialData = true
		settings.InitialValue = test.p.Func(test.x)
		if method.Needs().Gradient {
			settings.InitialGradient = resize(settings.InitialGradient, len(test.x))
			test.p.Grad(test.x, settings.InitialGradient)
		}
		if method.Needs().Hessian {
			settings.InitialHessian = mat64.NewSymDense(len(test.x), nil)
			test.p.Hess(test.x, settings.InitialHessian)
		}

		// Rerun the test again to make sure that it gets the same answer with
		// the same starting condition. Moreover, we are using the initial data.
		result2, err2 := Local(test.p, test.x, settings, method)
		if err2 != nil {
			t.Errorf("error finding minimum second time (%v) for:\n%v", err2, test)
			continue
		}
		if result2 == nil {
			t.Errorf("second time nil result without error for:\n%v", test)
			continue
		}

		// At the moment all the optimizers are deterministic, so check that we
		// get _exactly_ the same answer second time as well.
		if result.F != result2.F || !floats.Equal(result.X, result2.X) {
			t.Errorf("Different minimum second time for:\n%v", test)
		}

		// Check that providing initial data reduces the number of evaluations exactly by one.
		if result.FuncEvaluations != result2.FuncEvaluations+1 {
			t.Errorf("Providing initial data does not reduce the number of Func() calls for:\n%v", test)
			continue
		}
		if method.Needs().Gradient {
			if result.GradEvaluations != result2.GradEvaluations+1 {
				t.Errorf("Providing initial data does not reduce the number of Grad() calls for:\n%v", test)
				continue
			}
		}
		if method.Needs().Hessian {
			if result.HessEvaluations != result2.HessEvaluations+1 {
				t.Errorf("Providing initial data does not reduce the number of Hess() calls for:\n%v", test)
				continue
			}
		}
	}
}
Exemplo n.º 22
0
// Linesearch performs a linesearch. Optimizer should turn off all non-wolfe status patterns for the gradient and step
func Linesearch(multifun common.MultiObjGrad, method LinesearchMethod, settings univariate.GradSettings, wolfe WolfeConditioner, searchVector []float64, initLoc []float64, initObj float64, initGrad []float64) (*LinesearchResult, error) {

	// Linesearch modifies the values of the slices, but should revert the changes by the end

	// Find the norm of the search direction
	normSearchVector := floats.Norm(searchVector, 2)

	// Find the search direction (replace this with an input to avoid make?)
	direction := make([]float64, len(searchVector))
	copy(direction, searchVector)
	floats.Scale(1/normSearchVector, direction)

	// Find the initial projection of the gradient into the search direction
	initDirectionalGrad := floats.Dot(direction, initGrad)

	if initDirectionalGrad > 0 {
		return &LinesearchResult{}, errors.New("initial directional gradient must be negative")
	}

	// Set wolfe constants
	wolfe.SetInitState(initObj, initDirectionalGrad)
	wolfe.SetCurrState(initObj, initDirectionalGrad, 1.0)
	fun := &linesearchFun{
		fun:         multifun,
		wolfe:       wolfe,
		direction:   direction,
		initLoc:     initLoc,
		currLoc:     make([]float64, len(initLoc)),
		currLocCopy: make([]float64, len(initLoc)),
		currGrad:    make([]float64, len(initLoc)),
	}

	settings.Gradient.Initial = initDirectionalGrad
	settings.Objective.Initial = initObj

	stepSettings := method.GetStepSettings()
	stepSettings.InitialStepSize = normSearchVector

	// Run optimization, initial location is zero
	optVal, optLoc, result, err := univariate.OptimizeGrad(fun, 0, settings, method)
	//status, err := common.OptimizeOpter(method, fun)

	// Regerate results structure (do this before returning error in case optimizer can recover from it)
	// need to scale alpha_k because linesearch is x_k + alpha_k p_k
	r := &LinesearchResult{
		Loc:  fun.currLoc,
		Obj:  optVal,
		Grad: fun.currGrad,
		Step: optLoc / normSearchVector,
	}

	if err != nil {
		fmt.Println("Error in linsearch")
		return r, errors.New("linesearch: error during linesearch optimization: " + err.Error())
	}
	stat := result.Status
	// Check to make sure that the status due to wolfe status
	if stat != common.WolfeConditionsMet {
		// If the status wasn't because of wolfe conditions, see if they are met anyway
		c := wolfe.Status()
		if c == common.WolfeConditionsMet {
			// Conditions met, no problem
			return r, nil
		}
		// Conditions not met
		return r, errors.New("linesearch: status not because of wolfe conditions.")
	}
	return r, nil
}
Exemplo n.º 23
0
func (t TwoNorm) Loss(parameters []float64) float64 {
	return t.Gamma * math.Pow(floats.Norm(parameters, 2), 2)
}
Exemplo n.º 24
0
func (lbfgs *Lbfgs) Iterate(loc *multi.Location, obj *uni.Objective, grad *multi.Gradient, fun optimize.MultiObjGrad) (status.Status, error) {
	counter := lbfgs.counter
	q := lbfgs.q
	a := lbfgs.a
	b := lbfgs.b
	rhoHist := lbfgs.rhoHist
	sHist := lbfgs.sHist
	yHist := lbfgs.yHist
	gamma_k := lbfgs.gamma_k
	tmp := lbfgs.tmp
	p_k := lbfgs.p_k
	s_k := lbfgs.s_k
	y_k := lbfgs.y_k
	z := lbfgs.z

	// Calculate search direction
	for i, val := range grad.Curr() {
		q[i] = val
	}
	for i := counter - 1; i >= 0; i-- {
		a[i] = rhoHist[i] * floats.Dot(sHist[i], q)
		copy(tmp, yHist[i])
		floats.Scale(a[i], tmp)
		floats.Sub(q, tmp)
	}
	for i := lbfgs.NumStore - 1; i >= counter; i-- {
		a[i] = rhoHist[i] * floats.Dot(sHist[i], q)
		copy(tmp, yHist[i])
		floats.Scale(a[i], tmp)
		//fmt.Println(q)
		//fmt.Println(tmp)
		floats.Sub(q, tmp)
	}

	// Assume H_0 is the identity times gamma_k
	copy(z, q)
	floats.Scale(gamma_k, z)
	// Second loop for update, going oldest to newest
	for i := counter; i < lbfgs.NumStore; i++ {
		b[i] = rhoHist[i] * floats.Dot(yHist[i], z)
		copy(tmp, sHist[i])
		floats.Scale(a[i]-b[i], tmp)
		floats.Add(z, tmp)
	}
	for i := 0; i < counter; i++ {
		b[i] = rhoHist[i] * floats.Dot(yHist[i], z)
		copy(tmp, sHist[i])
		floats.Scale(a[i]-b[i], tmp)
		floats.Add(z, tmp)
	}

	lbfgs.a = a
	lbfgs.b = b

	copy(p_k, z)
	floats.Scale(-1, p_k)
	normP_k := floats.Norm(p_k, 2)

	// Perform line search -- need to find some way to implement this, especially bookkeeping function values
	linesearchResult, err := linesearch.Linesearch(fun, lbfgs.LinesearchMethod, lbfgs.LinesearchSettings, lbfgs.Wolfe, p_k, loc.Curr(), obj.Curr(), grad.Curr())

	// In the future add a check to switch to a different linesearcher?
	if err != nil {
		return status.LinesearchFailure, err
	}
	x_kp1 := linesearchResult.Loc
	f_kp1 := linesearchResult.Obj
	g_kp1 := linesearchResult.Grad
	alpha_k := linesearchResult.Step

	// Update hessian estimate
	copy(s_k, p_k)
	floats.Scale(alpha_k, s_k)

	copy(y_k, g_kp1)
	floats.Sub(y_k, grad.Curr())
	skDotYk := floats.Dot(s_k, y_k)

	// Bookkeep the results
	stepSize := alpha_k * normP_k
	lbfgs.step.AddToHist(stepSize)
	lbfgs.step.SetCurr(stepSize)
	loc.SetCurr(x_kp1)
	//lbfgs.loc.AddToHist(x_kp1)

	//fmt.Println(lbfgs.loc.GetHist())
	obj.SetCurr(f_kp1)
	grad.SetCurr(g_kp1)

	copy(sHist[counter], s_k)
	copy(yHist[counter], y_k)
	rhoHist[counter] = 1 / skDotYk

	lbfgs.gamma_k = skDotYk / floats.Dot(y_k, y_k)

	lbfgs.counter += 1
	if lbfgs.counter == lbfgs.NumStore {
		lbfgs.counter = 0
	}
	return status.Continue, nil
}
Exemplo n.º 25
0
func (o OneNorm) Loss(parameters []float64) float64 {
	return o.Gamma * floats.Norm(parameters, 1)
}
Exemplo n.º 26
0
func testDtrevc3(t *testing.T, impl Dtrevc3er, side lapack.EigVecSide, howmny lapack.HowMany, tmat blas64.General, optwork bool, rnd *rand.Rand) {
	const tol = 1e-14

	n := tmat.Rows
	extra := tmat.Stride - tmat.Cols
	right := side != lapack.LeftEigVec
	left := side != lapack.RightEigVec

	var selected, selectedWant []bool
	var mWant int // How many columns will the eigenvectors occupy.
	if howmny == lapack.SelectedEigVec {
		selected = make([]bool, n)
		selectedWant = make([]bool, n)
		// Dtrevc3 will compute only selected eigenvectors. Pick them
		// randomly disregarding whether they are real or complex.
		for i := range selected {
			if rnd.Float64() < 0.5 {
				selected[i] = true
			}
		}
		// Dtrevc3 will modify (standardize) the slice selected based on
		// whether the corresponding eigenvalues are real or complex. Do
		// the same process here to fill selectedWant.
		for i := 0; i < n; {
			if i == n-1 || tmat.Data[(i+1)*tmat.Stride+i] == 0 {
				// Real eigenvalue.
				if selected[i] {
					selectedWant[i] = true
					mWant++ // Real eigenvectors occupy one column.
				}
				i++
			} else {
				// Complex eigenvalue.
				if selected[i] || selected[i+1] {
					// Dtrevc3 will modify selected so that
					// only the first element of the pair is
					// true.
					selectedWant[i] = true
					mWant += 2 // Complex eigenvectors occupy two columns.
				}
				i += 2
			}
		}
	} else {
		// All eigenvectors occupy n columns.
		mWant = n
	}

	var vr blas64.General
	if right {
		if howmny == lapack.AllEigVecMulQ {
			vr = eye(n, n+extra)
		} else {
			// VR will be overwritten.
			vr = nanGeneral(n, mWant, n+extra)
		}
	}

	var vl blas64.General
	if left {
		if howmny == lapack.AllEigVecMulQ {
			vl = eye(n, n+extra)
		} else {
			// VL will be overwritten.
			vl = nanGeneral(n, mWant, n+extra)
		}
	}

	work := make([]float64, max(1, 3*n))
	if optwork {
		impl.Dtrevc3(side, howmny, nil, n, nil, 1, nil, 1, nil, 1, mWant, work, -1)
		work = make([]float64, int(work[0]))
	}

	m := impl.Dtrevc3(side, howmny, selected, n, tmat.Data, tmat.Stride,
		vl.Data, vl.Stride, vr.Data, vr.Stride, mWant, work, len(work))

	prefix := fmt.Sprintf("Case side=%v, howmny=%v, n=%v, extra=%v, optwk=%v",
		side, howmny, n, extra, optwork)

	if !generalOutsideAllNaN(tmat) {
		t.Errorf("%v: out-of-range write to T", prefix)
	}
	if !generalOutsideAllNaN(vl) {
		t.Errorf("%v: out-of-range write to VL", prefix)
	}
	if !generalOutsideAllNaN(vr) {
		t.Errorf("%v: out-of-range write to VR", prefix)
	}

	if m != mWant {
		t.Errorf("%v: unexpected value of m. Want %v, got %v", prefix, mWant, m)
	}

	if howmny == lapack.SelectedEigVec {
		for i := range selected {
			if selected[i] != selectedWant[i] {
				t.Errorf("%v: unexpected selected[%v]", prefix, i)
			}
		}
	}

	// Check that the columns of VR and VL are actually eigenvectors and
	// that the magnitude of their largest element is 1.
	var k int
	for j := 0; j < n; {
		re := tmat.Data[j*tmat.Stride+j]
		if j == n-1 || tmat.Data[(j+1)*tmat.Stride+j] == 0 {
			if howmny == lapack.SelectedEigVec && !selected[j] {
				j++
				continue
			}
			if right {
				ev := columnOf(vr, k)
				norm := floats.Norm(ev, math.Inf(1))
				if math.Abs(norm-1) > tol {
					t.Errorf("%v: magnitude of largest element of VR[:,%v] not 1", prefix, k)
				}
				if !isRightEigenvectorOf(tmat, ev, nil, complex(re, 0), tol) {
					t.Errorf("%v: VR[:,%v] is not real right eigenvector", prefix, k)
				}
			}
			if left {
				ev := columnOf(vl, k)
				norm := floats.Norm(ev, math.Inf(1))
				if math.Abs(norm-1) > tol {
					t.Errorf("%v: magnitude of largest element of VL[:,%v] not 1", prefix, k)
				}
				if !isLeftEigenvectorOf(tmat, ev, nil, complex(re, 0), tol) {
					t.Errorf("%v: VL[:,%v] is not real left eigenvector", prefix, k)
				}
			}
			k++
			j++
			continue
		}
		if howmny == lapack.SelectedEigVec && !selected[j] {
			j += 2
			continue
		}
		im := math.Sqrt(math.Abs(tmat.Data[(j+1)*tmat.Stride+j])) *
			math.Sqrt(math.Abs(tmat.Data[j*tmat.Stride+j+1]))
		if right {
			evre := columnOf(vr, k)
			evim := columnOf(vr, k+1)
			var evmax float64
			for i, v := range evre {
				evmax = math.Max(evmax, math.Abs(v)+math.Abs(evim[i]))
			}
			if math.Abs(evmax-1) > tol {
				t.Errorf("%v: magnitude of largest element of VR[:,%v] not 1", prefix, k)
			}
			if !isRightEigenvectorOf(tmat, evre, evim, complex(re, im), tol) {
				t.Errorf("%v: VR[:,%v:%v] is not complex right eigenvector", prefix, k, k+1)
			}
			floats.Scale(-1, evim)
			if !isRightEigenvectorOf(tmat, evre, evim, complex(re, -im), tol) {
				t.Errorf("%v: VR[:,%v:%v] is not complex right eigenvector", prefix, k, k+1)
			}
		}
		if left {
			evre := columnOf(vl, k)
			evim := columnOf(vl, k+1)
			var evmax float64
			for i, v := range evre {
				evmax = math.Max(evmax, math.Abs(v)+math.Abs(evim[i]))
			}
			if math.Abs(evmax-1) > tol {
				t.Errorf("%v: magnitude of largest element of VL[:,%v] not 1", prefix, k)
			}
			if !isLeftEigenvectorOf(tmat, evre, evim, complex(re, im), tol) {
				t.Errorf("%v: VL[:,%v:%v] is not complex left eigenvector", prefix, k, k+1)
			}
			floats.Scale(-1, evim)
			if !isLeftEigenvectorOf(tmat, evre, evim, complex(re, -im), tol) {
				t.Errorf("%v: VL[:,%v:%v] is not complex left eigenvector", prefix, k, k+1)
			}
		}
		k += 2
		j += 2
	}
}
Exemplo n.º 27
0
// HITS returns the Hyperlink-Induced Topic Search hub-authority scores for
// nodes of the directed graph g. HITS terminates when the 2-norm of the
// vector difference between iterations is below tol. The returned map is
// keyed on the graph node IDs.
func HITS(g graph.Directed, tol float64) map[int]HubAuthority {
	nodes := g.Nodes()

	// Make a topological copy of g with dense node IDs.
	indexOf := make(map[int]int, len(nodes))
	for i, n := range nodes {
		indexOf[n.ID()] = i
	}
	nodesLinkingTo := make([][]int, len(nodes))
	nodesLinkedFrom := make([][]int, len(nodes))
	for i, n := range nodes {
		for _, u := range g.To(n) {
			nodesLinkingTo[i] = append(nodesLinkingTo[i], indexOf[u.ID()])
		}
		for _, v := range g.From(n) {
			nodesLinkedFrom[i] = append(nodesLinkedFrom[i], indexOf[v.ID()])
		}
	}
	indexOf = nil

	w := make([]float64, 4*len(nodes))
	auth := w[:len(nodes)]
	hub := w[len(nodes) : 2*len(nodes)]
	for i := range nodes {
		auth[i] = 1
		hub[i] = 1
	}
	deltaAuth := w[2*len(nodes) : 3*len(nodes)]
	deltaHub := w[3*len(nodes):]

	var norm float64
	for {
		norm = 0
		for v := range nodes {
			var a float64
			for _, u := range nodesLinkingTo[v] {
				a += hub[u]
			}
			deltaAuth[v] = auth[v]
			auth[v] = a
			norm += a * a
		}
		norm = math.Sqrt(norm)

		for i := range auth {
			auth[i] /= norm
			deltaAuth[i] -= auth[i]
		}

		norm = 0
		for u := range nodes {
			var h float64
			for _, v := range nodesLinkedFrom[u] {
				h += auth[v]
			}
			deltaHub[u] = hub[u]
			hub[u] = h
			norm += h * h
		}
		norm = math.Sqrt(norm)

		for i := range hub {
			hub[i] /= norm
			deltaHub[i] -= hub[i]
		}

		if floats.Norm(deltaAuth, 2) < tol && floats.Norm(deltaHub, 2) < tol {
			break
		}
	}

	hubAuth := make(map[int]HubAuthority, len(nodes))
	for i, n := range nodes {
		hubAuth[n.ID()] = HubAuthority{Hub: hub[i], Authority: auth[i]}
	}

	return hubAuth
}
Exemplo n.º 28
0
// SetCurr sets the current value of the float and appends it to the history
// if float.SaveHist() is true. Assumes that the length does not change per
// iteration. A copy is NOT made. Should only be called by the optimizer.
func (f *Floats) SetCurr(val []float64) {
	f.AddToHist(val)
	copy(f.curr, val)
	f.norm = floats.Norm(f.curr, 2)
}