Exemple #1
0
// 根据正则化方法计算偏导数向量需要添加正则化项
func ComputeRegularization(weights *util.Matrix, options OptimizerOptions) *util.Matrix {
	reg := weights.Populate()

	if options.RegularizationScheme == 1 {
		// L-1正则化
		for iLabel := 0; iLabel < weights.NumLabels(); iLabel++ {
			for _, k := range weights.GetValues(iLabel).Keys() {
				if weights.Get(iLabel, k) > 0 {
					reg.Set(iLabel, k, options.RegularizationFactor)
				} else {
					reg.Set(iLabel, k, -options.RegularizationFactor)
				}
			}
		}
	} else if options.RegularizationScheme == 2 {
		// L-2正则化
		for iLabel := 0; iLabel < weights.NumLabels(); iLabel++ {
			for _, k := range weights.GetValues(iLabel).Keys() {
				reg.Set(iLabel, k, options.RegularizationFactor*float64(2)*weights.Get(iLabel, k))
			}
		}
	}

	return reg
}
// 计算 z = 1 + sum(exp(sum(w_i * x_i)))
//
// 在temp中保存 exp(sum(w_i * x_i))
func ComputeZ(weights *util.Matrix, features *util.Vector, label int, temp *util.Matrix) float64 {
	result := float64(1.0)
	numLabels := weights.NumLabels() + 1

	for iLabel := 1; iLabel < numLabels; iLabel++ {
		exp := math.Exp(util.VecDotProduct(features, weights.GetValues(iLabel-1)))
		result += exp

		tempVec := temp.GetValues(iLabel - 1)
		if tempVec.IsSparse() {
			for _, k := range features.Keys() {
				tempVec.Set(k, exp)
			}
		} else {
			tempVec.SetAll(exp)
		}
	}
	return result
}
func MaxEntComputeInstanceDerivative(
	weights *util.Matrix, instance *data.Instance, instanceDerivative *util.Matrix) {
	// 定义偏导和特征向量
	features := instance.Features

	// 得到维度信息
	numLabels := weights.NumLabels() + 1

	// 计算 z = 1 + exp(sum(w_i * x_i))
	label := instance.Output.Label
	z := ComputeZ(weights, features, label, instanceDerivative)
	inverseZ := float64(1) / z

	for iLabel := 1; iLabel < numLabels; iLabel++ {
		vec := instanceDerivative.GetValues(iLabel - 1)
		if label == 0 || label != iLabel {
			vec.Multiply(inverseZ, 0, features)
		} else {
			vec.Multiply(inverseZ, -1, features)
		}
	}
}
Exemple #4
0
// 输入x_k和g_k,返回x需要更新的增量 d_k = - H_k * g_k
func (opt *lbfgsOptimizer) GetDeltaX(x, g *util.Matrix) *util.Matrix {
	if x.NumLabels() != g.NumLabels() {
		log.Fatal("x和g的维度不一致")
	}

	// 第一次调用时开辟内存
	if opt.k == 0 {
		if x.IsSparse() {
			opt.initStruct(x.NumLabels(), 0, x.IsSparse())
		} else {
			opt.initStruct(x.NumLabels(), x.NumValues(), x.IsSparse())
		}
	}

	currIndex := util.Mod(opt.k, *lbfgs_history_size)

	// 更新x_k
	opt.x[currIndex].DeepCopy(x)

	// 更新g_k
	opt.g[currIndex].DeepCopy(g)

	// 当为第0步时,使用简单的gradient descent
	if opt.k == 0 {
		opt.k++
		return g.Opposite()
	}

	prevIndex := util.Mod(opt.k-1, *lbfgs_history_size)

	// 更新s_(k-1)
	opt.s[prevIndex].WeightedSum(opt.x[currIndex], opt.x[prevIndex], 1, -1)

	// 更新y_(k-1)
	opt.y[prevIndex].WeightedSum(opt.g[currIndex], opt.g[prevIndex], 1, -1)

	// 更新ro_(k-1)
	opt.ro.Set(prevIndex, 1.0/util.MatrixDotProduct(opt.y[prevIndex], opt.s[prevIndex]))

	// 计算两个循环的下限
	lowerBound := opt.k - *lbfgs_history_size
	if lowerBound < 0 {
		lowerBound = 0
	}

	// 第一个循环
	opt.q.DeepCopy(g)
	for i := opt.k - 1; i >= lowerBound; i-- {
		currIndex := util.Mod(i, *lbfgs_history_size)
		opt.alpha.Set(currIndex,
			opt.ro.Get(currIndex)*util.MatrixDotProduct(opt.s[currIndex], opt.q))
		opt.q.Increment(opt.y[currIndex], -opt.alpha.Get(currIndex))
	}

	// 第二个循环
	opt.z.DeepCopy(opt.q)
	for i := lowerBound; i <= opt.k-1; i++ {
		currIndex := util.Mod(i, *lbfgs_history_size)
		opt.beta.Set(currIndex,
			opt.ro.Get(currIndex)*util.MatrixDotProduct(opt.y[currIndex], opt.z))
		opt.z.Increment(opt.s[currIndex],
			opt.alpha.Get(currIndex)-opt.beta.Get(currIndex))
	}

	// 更新k
	opt.k++

	return opt.z.Opposite()
}