Example #1
0
func ExamplePGM7_LogisticRegression_Epsilon() {
	rand.Seed(98765)
	trainFeatures, trainLabels := readTrainData()
	benchmarkFeatures, benchmarkLabels := readBenchmarkData()

	for _, epsilon := range []float64{1e-1, 1e-2, 1e-3} {
		fmt.Println("---\nepsilon: ", epsilon)
		classifier := ai.TrainLogisticRegressionClassifier(
			trainFeatures,
			trainLabels,
			0,
			&sgrad.RelativeMeanImprovementCrit{},
			epsilon)
		fmt.Println("train set: ", ai.EvaluateBinaryClassifier(classifier, trainFeatures, trainLabels))
		fmt.Println("benchmark set: ", ai.EvaluateBinaryClassifier(classifier, benchmarkFeatures, benchmarkLabels))
	}

	// Output:
	// ---
	// epsilon:  0.1
	// train set:  0.99
	// benchmark set:  0.94
	// ---
	// epsilon:  0.01
	// train set:  0.99
	// benchmark set:  0.925
	// ---
	// epsilon:  0.001
	// train set:  1
	// benchmark set:  0.93
}
Example #2
0
func ExamplePGM7_LogisticRegression_OptimizeLambda() {
	rand.Seed(98765)
	trainFeatures, trainLabels := readTrainData()
	benchmarkFeatures, benchmarkLabels := readBenchmarkData()

	goalFunc := func(lambda float64) float64 {
		score := ai.HoldoutTestBinaryClassifier(
			trainFeatures,
			trainLabels,
			.1,
			ai.NewLogisticRegressionTrainer(
				lambda,
				&sgrad.NumIterationsCrit{NumIterations: 10},
				1e-8))
		return -score
	}

	lambda := gssearh.Minimize(0, 10, goalFunc, &gssearh.AbsoluteErrorTermCrit{}, .1)
	fmt.Println("Optimal lambda:", lambda)
	classifier := ai.TrainLogisticRegressionClassifier(
		trainFeatures,
		trainLabels,
		lambda,
		&sgrad.RelativeMeanImprovementCrit{},
		1e-2)
	fmt.Println("train set: ", ai.EvaluateBinaryClassifier(classifier, trainFeatures, trainLabels))
	fmt.Println("benchmark set: ", ai.EvaluateBinaryClassifier(classifier, benchmarkFeatures, benchmarkLabels))

	// Output:
	// Optimal lambda: 1.4841053312063623
	// train set:  0.895
	// benchmark set:  0.9
}
Example #3
0
func ExamplePGM7_LogisticRegression_Iterations() {
	rand.Seed(98765)
	trainFeatures, trainLabels := readTrainData()
	benchmarkFeatures, benchmarkLabels := readBenchmarkData()

	for _, iterations := range []int{1, 10, 100, 1000} {
		fmt.Println("---\niterations: ", iterations)
		classifier := ai.TrainLogisticRegressionClassifier(
			trainFeatures,
			trainLabels,
			0,
			&sgrad.NumIterationsCrit{NumIterations: iterations},
			1e-8)
		fmt.Println("train set: ", ai.EvaluateBinaryClassifier(classifier, trainFeatures, trainLabels))
		fmt.Println("benchmark set: ", ai.EvaluateBinaryClassifier(classifier, benchmarkFeatures, benchmarkLabels))
	}

	// Output:
	// ---
	// iterations:  1
	// train set:  0.955
	// benchmark set:  0.94
	// ---
	// iterations:  10
	// train set:  0.985
	// benchmark set:  0.925
	// ---
	// iterations:  100
	// train set:  1
	// benchmark set:  0.925
	// ---
	// iterations:  1000
	// train set:  1
	// benchmark set:  0.93
}
Example #4
0
func ExamplePGM7_LogisticRegression_Lambda() {
	rand.Seed(98765)
	trainFeatures, trainLabels := readTrainData()
	benchmarkFeatures, benchmarkLabels := readBenchmarkData()

	for _, lambda := range []float64{0, 0.1, 0.2, 0.4, 0.8, 1} {
		fmt.Println("---\nlambda: ", lambda)
		classifier := ai.TrainLogisticRegressionClassifier(
			trainFeatures,
			trainLabels,
			lambda,
			&sgrad.RelativeMeanImprovementCrit{},
			1e-2)
		fmt.Println("train set: ", ai.EvaluateBinaryClassifier(classifier, trainFeatures, trainLabels))
		fmt.Println("benchmark set: ", ai.EvaluateBinaryClassifier(classifier, benchmarkFeatures, benchmarkLabels))
	}

	// Output:
	// ---
	// lambda:  0
	// train set:  1
	// benchmark set:  0.93
	// ---
	// lambda:  0.1
	// train set:  0.94
	// benchmark set:  0.93
	// ---
	// lambda:  0.2
	// train set:  0.94
	// benchmark set:  0.925
	// ---
	// lambda:  0.4
	// train set:  0.925
	// benchmark set:  0.915
	// ---
	// lambda:  0.8
	// train set:  0.875
	// benchmark set:  0.86
	// ---
	// lambda:  1
	// train set:  0.905
	// benchmark set:  0.905
}