Example #1
0
File: main.go Project: reggo/reggo
func main() {

	runtime.GOMAXPROCS(runtime.NumCPU() - 2)

	gopath := os.Getenv("GOPATH")
	path := filepath.Join(gopath, "prof", "github.com", "reggo", "reggo", "nnet")

	nInputs := 10
	nOutputs := 3
	nLayers := 2
	nNeurons := 50
	nSamples := 1000000
	nRuns := 50

	config := &profile.Config{
		CPUProfile:  true,
		ProfilePath: path,
	}

	defer profile.Start(config).Stop()

	net, err := nnet.NewSimpleTrainer(nInputs, nOutputs, nLayers, nNeurons, nnet.Linear{})
	if err != nil {
		log.Fatal(err)
	}

	// Generate some random data
	inputs := mat64.NewDense(nSamples, nInputs, nil)
	outputs := mat64.NewDense(nSamples, nOutputs, nil)
	for i := 0; i < nSamples; i++ {
		for j := 0; j < nInputs; j++ {
			inputs.Set(i, j, rand.Float64())
		}
		for j := 0; j < nOutputs; j++ {
			outputs.Set(i, j, rand.Float64())
		}
	}

	// Create trainer
	prob := train.NewBatchGradBased(net, true, inputs, outputs, nil, nil, nil)
	nParameters := net.NumParameters()

	parameters := make([]float64, nParameters)
	derivative := make([]float64, nParameters)

	for i := 0; i < nRuns; i++ {
		net.RandomizeParameters()
		net.Parameters(parameters)
		prob.ObjGrad(parameters, derivative)
		fmt.Println(floats.Sum(derivative))
	}
}
Example #2
0
// TestDeriv uses finite difference to test that the prediction from Deriv
// is correct, and tests that computing the loss in parallel works properly
// Only does finite difference for the first nTest to save time
func TestDeriv(t *testing.T, trainable DerivTester, inputs, trueOutputs common.RowMatrix, name string) {

	// Set the parameters to something random
	trainable.RandomizeParameters()

	// Compute the loss and derivative
	losser := loss.SquaredDistance{}
	regularizer := regularize.TwoNorm{}

	batchGrad := train.NewBatchGradBased(trainable, true, inputs, trueOutputs, nil, losser, regularizer)

	derivative := make([]float64, trainable.NumParameters())
	parameters := trainable.Parameters(nil)
	// Don't need to check loss, because if predict is right and losser is right then loss must be correct
	_ = batchGrad.ObjGrad(parameters, derivative)

	fdDerivative := make([]float64, trainable.NumParameters())

	wg := &sync.WaitGroup{}
	wg.Add(trainable.NumParameters())
	for i := 0; i < trainable.NumParameters(); i++ {
		go func(i int) {
			newParameters := make([]float64, trainable.NumParameters())
			tmpDerivative := make([]float64, trainable.NumParameters())
			copy(newParameters, parameters)
			newParameters[i] += fdStep
			loss1 := batchGrad.ObjGrad(newParameters, tmpDerivative)
			newParameters[i] -= 2 * fdStep
			loss2 := batchGrad.ObjGrad(newParameters, tmpDerivative)
			newParameters[i] += fdStep
			fdDerivative[i] = (loss1 - loss2) / (2 * fdStep)
			wg.Done()
		}(i)
	}
	wg.Wait()
	if !floats.EqualApprox(derivative, fdDerivative, 1e-6) {
		t.Errorf("%v: deriv doesn't match: Finite Difference: %v, Analytic: %v", name, fdDerivative, derivative)
	}
}
Example #3
0
// TestLinearsolveAndDeriv compares the optimal weights found from gradient-based optimization with those found
// from computing a linear solve
func TestLinearsolveAndDeriv(t *testing.T, linear train.LinearTrainable, inputs, trueOutputs common.RowMatrix, name string) {
	// Compare with no weights

	rows, cols := trueOutputs.Dims()
	predOutLinear := mat64.NewDense(rows, cols, nil)
	parametersLinearSolve := train.LinearSolve(linear, nil, inputs, trueOutputs, nil, nil)

	linear.SetParameters(parametersLinearSolve)
	linear.Predictor().PredictBatch(inputs, predOutLinear)

	//fmt.Println("Pred out linear", predOutLinear)

	linear.RandomizeParameters()
	parameters := linear.Parameters(nil)

	batch := train.NewBatchGradBased(linear, true, inputs, trueOutputs, nil, loss.SquaredDistance{}, regularize.None{})
	problem := batch
	settings := multivariate.DefaultSettings()
	settings.GradAbsTol = 1e-11
	//settings. = 0

	result, err := multivariate.OptimizeGrad(problem, parameters, settings, nil)
	if err != nil {
		t.Errorf("Error training: %v", err)
	}

	parametersDeriv := result.Loc

	deriv := make([]float64, linear.NumParameters())

	loss1 := batch.ObjGrad(parametersDeriv, deriv)

	linear.SetParameters(parametersDeriv)
	predOutDeriv := mat64.NewDense(rows, cols, nil)
	linear.Predictor().PredictBatch(inputs, predOutDeriv)

	linear.RandomizeParameters()
	init2 := linear.Parameters(nil)
	batch2 := train.NewBatchGradBased(linear, true, inputs, trueOutputs, nil, loss.SquaredDistance{}, regularize.None{})
	problem2 := batch2
	result2, err := multivariate.OptimizeGrad(problem2, init2, settings, nil)
	parametersDeriv2 := result2.Loc

	//fmt.Println("starting deriv2 loss")
	deriv2 := make([]float64, linear.NumParameters())
	loss2 := batch2.ObjGrad(parametersDeriv2, deriv2)

	//fmt.Println("starting derivlin loss")
	derivlinear := make([]float64, linear.NumParameters())
	lossLin := batch2.ObjGrad(parametersLinearSolve, derivlinear)

	_ = loss1
	_ = loss2
	_ = lossLin

	/*

		fmt.Println("param deriv 1 =", parametersDeriv)
		fmt.Println("param deriv2  =", parametersDeriv2)
		fmt.Println("linear params =", parametersLinearSolve)

		fmt.Println("deriv1 loss =", loss1)
		fmt.Println("deriv2 loss =", loss2)
		fmt.Println("lin loss    =", lossLin)

		fmt.Println("deriv    =", deriv)
		fmt.Println("deriv2   =", deriv2)
		fmt.Println("linderiv =", derivlinear)

		//fmt.Println("Pred out deriv", predOutDeriv)

	*/

	/*
		for i := 0; i < rows; i++ {
			fmt.Println(predOutLinear.RowView(i), predOutBatch.RowView(i))
		}
	*/

	if !floats.EqualApprox(parametersLinearSolve, parametersDeriv, 1e-8) {
		t.Errorf("Parameters don't match for gradient based and linear solve.")
		//for i := range parametersDeriv {
		//	fmt.Printf("index %v: Deriv = %v, linsolve = %v, diff = %v\n", i, parametersDeriv[i], parametersLinearSolve[i], parametersDeriv[i]-parametersLinearSolve[i])
		//}
	}

}