func (trainer *AdaboostClassifierTrainer) trainNextBaseEstimator(data_set *mlearn.DataSet, step int) (
	float64, mlearn.BaseClassifier, float64) {
	base_estimator := trainer.baseModelTrainer.TrainClassifierWithWeights(data_set, trainer.weights)
	// embedded features ranking
	if trainer.options.EnableEmbeddedFeaturesRanking {
		for j := range trainer.EmbeddedFeaturesRank {
			trainer.EmbeddedFeaturesRank[j] += trainer.baseModelTrainer.GetFeaturesRank()[j]
		}
	}

	// save the predictions for later use
	sample := make([]float64, data_set.FeaturesNum)
	for i := range trainer.prediction {
		data_set.GetSampleInplace(i, sample)
		trainer.prediction[i] = base_estimator.PredictProbe(sample)
	}

	// calculate the weighted prediction error
	var err float64
	for i := 0; i < data_set.SamplesNum; i++ {
		if trainer.prediction[i] != data_set.Classes[i] {
			err += trainer.weights[i]
		}
	}

	// check it to prevent math.Log domain violation (err == 0 means that there's nothing to boost)
	if err == 0 {
		return 1.0, base_estimator, err
	}
	b := math.Log((1.0-err)/err) + math.Log(float64(data_set.ClassesNum)-1.0)

	// update weights according to error
	const minimalWeight float64 = 10e-9
	var weights_sum float64
	for i := range trainer.weights {
		if trainer.prediction[i] != data_set.Classes[i] {
			trainer.weights[i] *= math.Exp(b)

			if trainer.weights[i] < minimalWeight { /* this is to deal with rounding errors */
				trainer.weights[i] = 0
			}
		} else {
			//trainer.weights[i] *= math.Exp(-b)
		}
		weights_sum += trainer.weights[i]
	}

	// normalize weights to 1.0 sum
	var normalized_sum float64
	for i := range trainer.weights {
		trainer.weights[i] /= weights_sum
		normalized_sum += trainer.weights[i]
	}
	// dirty hack to guarantee 1.0 weights sum which is important to base model (regardless of rounding errors)
	trainer.weights[0] += 1.0 - normalized_sum

	return b, base_estimator, err
}