Beispiel #1
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	if weights != nil {
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}

		diagWeight := diagonal.NewDiagonal(nFeatures, weights)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Mul(diagWeight, trueOutputs)
		weightedFeatures.Mul(diagWeight, features)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}
	if weights == nil {
		parameterMat := mat64.Solve(features, trueOutputs)
		return parameterMat.RawMatrix().Data

	}
	parameterMat := mat64.Solve(weightedFeatures, weightedOutput)

	return parameterMat.RawMatrix().Data
}
Beispiel #2
0
// Iterative Re-weighting Least Squares Estimation for Generalized Linear Models
func (l *glm) Train(A *mat64.Dense, b []float64) ([]float64, error) {
	if l.config == nil {
		return nil, fmt.Errorf("config not set")
	}

	nrow, ncol := A.Dims()
	x := mat64.NewDense(ncol, 1, rep(0.0, ncol))

	var i int64
	var err error
	for ; i < l.config.MaxIt; i++ {
		eta := matrixMult(A, x)
		etaCol := eta.Col(nil, 0)

		var (
			g      = make([]float64, nrow) // g = invLink(eta)
			gprime = make([]float64, nrow) // g = derivativeFn(eta)
			w      = make([]float64, nrow) // w = gprime^2 / variance(g)
		)

		for i, val := range etaCol {
			g[i] = l.config.F.LinkFn(val)
			gprime[i] = l.config.F.DerivativeFn(val)
			w[i] = math.Pow(gprime[i], 2.0) / l.config.F.VarianceFn(g[i])
		}

		// z = eta + (b - g) / gprime
		z := mat64.NewDense(nrow, 1, nil)
		eta.Clone(z)
		z.Apply(func(i, j int, eta float64) float64 {
			return eta + (b[i]-g[i])/gprime[i]
		}, z)

		// convert w = w * I
		wMat := mat64.NewDense(nrow, nrow, rep(0.0, nrow*nrow))
		for i := 0; i < nrow; i++ {
			wMat.Set(i, i, w[i])
		}

		var (
			wa     = matrixMult(wMat, A)
			cprod1 = matrixMult(wa.T(), A)
			wz     = matrixMult(wMat, z)
			cprod2 = matrixMult(wz.T(), A)
		)

		// save xold for evaluating convergence
		xold := mat64.NewDense(ncol, 1, nil)
		x.Clone(xold)

		// xnew = solve(crossprod(A,W*A), crossprod(A,W*z))
		x, err = mat64.Solve(cprod1, cprod2.T())
		if err != nil {
			return nil, err
		}

		// convergence = sqrt(crossprod(x - xold)) <= tolerance
		diff := &mat64.Dense{}
		diff.Sub(x, xold)
		conv := matrixMult(diff.T(), diff)
		if math.Sqrt(conv.At(0, 0)) <= l.config.Tolerance {
			break
		}
	}

	coef := x.Col(nil, 0)
	return coef, nil
}
Beispiel #3
0
// LinearSolve trains a Linear algorithm.
// Assumes inputs and outputs are already scaled
// If features is nil will call featurize
// Will return nil if regularizer is not a linear regularizer
// Is destructive if any of the weights are zero
// Losser is always the two-norm
// Does not set the value of the parameters (in case this is called in parallel with a different routine)
func LinearSolve(linearTrainable LinearTrainable, features *mat64.Dense, inputs, trueOutputs common.RowMatrix,
	weights []float64, regularizer regularize.Regularizer) (parameters []float64) {
	// TODO: Allow tikhonov regularization
	// TODO: Add test for weights
	// TODO: Need to do something about returning a []float64

	if !IsLinearSolveRegularizer(regularizer) {
		return nil
	}

	if features == nil {
		features = FeaturizeTrainable(linearTrainable, inputs, features)
	}

	_, nFeatures := features.Dims()

	var weightedFeatures, weightedOutput *mat64.Dense

	fmt.Println("In linear solve")

	if weights != nil {
		panic("Need functionality to be better. Either banded special case in matrix or do the mulitplication by hand")
		scaledWeight := make([]float64, len(weights))
		for i, weight := range weights {
			scaledWeight[i] = math.Sqrt(weight)
		}
		diagWeight := diagonal.NewDiagonal(len(scaledWeight), scaledWeight)

		nSamples, outputDim := trueOutputs.Dims()
		weightedOutput = mat64.NewDense(nSamples, outputDim, nil)
		weightedFeatures = mat64.NewDense(nSamples, nFeatures, nil)

		weightedOutput.Copy(trueOutputs)
		weightedFeatures.Copy(features)

		// TODO: Replace this with better than mat multiply
		weightedOutput.Mul(diagWeight, weightedOutput)
		weightedFeatures.Mul(diagWeight, weightedFeatures)
	}

	switch regularizer.(type) {
	case nil:
	case regularize.None:
	default:
		panic("Shouldn't be here. Must be error in IsLinearRegularizer")
	}

	if weights == nil {
		parameterMat, err := mat64.Solve(features, trueOutputs)
		if err != nil {
			panic(err)
		}
		return parameterMat.RawMatrix().Data

	}
	parameterMat, err := mat64.Solve(weightedFeatures, weightedOutput)
	if err != nil {
		panic(err)
	}

	return parameterMat.RawMatrix().Data
}