Beispiel #1
0
// PropagateStart performs back-propagation through the
// start state.
func (l *LSTM) PropagateStart(_ []State, s []StateGrad, g autofunc.Gradient) {
	if vec, ok := g[l.initState]; ok {
		for _, x := range s {
			vec[:len(vec)/2].Add(linalg.Vector(x.(lstmState).Internal))
			vec[len(vec)/2:].Add(linalg.Vector(x.(lstmState).Output))
		}
	}
}
Beispiel #2
0
func maxEigenvalue(m *linalg.Matrix) float64 {
	inVec := make(linalg.Vector, m.Rows)
	for i := range inVec {
		inVec[i] = rand.NormFloat64()
	}
	inMat := linalg.NewMatrixColumn(inVec)
	for i := 0; i < npPowerIterations; i++ {
		inMat = m.MulFast(inMat)
		vec := linalg.Vector(inMat.Data)
		vec.Scale(1 / vec.Mag())
	}
	outVec := linalg.Vector(m.MulFast(inMat).Data)
	return outVec.Mag()
}
Beispiel #3
0
func denseTestInfo() (network Network, input *autofunc.Variable, grad linalg.Vector) {
	denseLayer := &DenseLayer{
		InputCount:  3,
		OutputCount: 2,
	}
	denseLayer.Randomize()
	network = Network{denseLayer, &Sigmoid{}}

	grad = linalg.Vector([]float64{0.5, -0.3})
	input = &autofunc.Variable{Vector: linalg.Vector([]float64{1, -1, 2})}

	copy(denseLayer.Weights.Data.Vector, []float64{1, 2, 3, -3, 2, -1})
	copy(denseLayer.Biases.Var.Vector, []float64{-6, 9})

	return
}
Beispiel #4
0
func (s *stateOutBlockRResult) ROutputs() []linalg.Vector {
	res := make([]linalg.Vector, len(s.WrappedOut.RStates()))
	for i, state := range s.WrappedOut.RStates() {
		res[i] = linalg.Vector(state.(VecRState).RState)
	}
	return res
}
Beispiel #5
0
// PropagateVarState is a helper to propagate a gradient
// through a VecState that was derived from a variable.
func PropagateVarState(v *autofunc.Variable, s []StateGrad, g autofunc.Gradient) {
	if vec, ok := g[v]; ok {
		for _, x := range s {
			vec.Add(linalg.Vector(x.(VecStateGrad)))
		}
	}
}
Beispiel #6
0
func convLayerTestInfo() (network Network, input *autofunc.Variable, outGrad linalg.Vector) {
	layer := &ConvLayer{
		FilterCount:  2,
		FilterWidth:  2,
		FilterHeight: 3,
		Stride:       2,
		InputWidth:   5,
		InputHeight:  7,
		InputDepth:   2,
	}
	network = Network{layer, &Sigmoid{}}
	network.Randomize()

	input = &autofunc.Variable{
		Vector: linalg.Vector([]float64{
			0.820, 0.548, 0.005, 0.850, 0.589, 0.882, 0.185, 0.243, 0.432, 0.734,
			0.478, 0.442, 0.835, 0.400, 0.270, 0.816, 0.467, 0.012, 0.060, 0.241,
			0.821, 0.069, 0.448, 0.691, 0.735, 0.090, 0.824, 0.042, 0.657, 0.707,
			0.218, 0.804, 0.025, 0.650, 0.833, 0.763, 0.788, 0.953, 0.796, 0.500,
			0.620, 0.038, 0.702, 0.524, 0.512, 0.699, 0.831, 0.122, 0.945, 0.840,
			0.584, 0.566, 0.586, 0.560, 0.109, 0.577, 0.785, 0.908, 0.080, 0.763,
			0.430, 0.561, 0.474, 0.516, 0.508, 0.062, 0.126, 0.371, 0.422, 0.424,
		}),
	}

	outGrad = linalg.Vector([]float64{
		0.388, 0.634, 0.752, 0.902,
		0.905, 0.047, 0.395, 0.808,
		0.648, 0.892, 0.154, 0.786,
	})

	copy(layer.Filters[0].Data, []float64{
		0.348, 0.299, 0.946, 0.806,
		0.101, 0.705, 0.821, 0.819,
		0.106, 0.348, 0.285, 0.133,
	})
	copy(layer.Filters[1].Data, []float64{
		0.293, 0.494, 0.148, 0.758,
		0.901, 0.050, 0.415, 0.892,
		0.736, 0.458, 0.465, 0.167,
	})
	copy(layer.Biases.Vector, []float64{0.333, -0.255})

	return
}
Beispiel #7
0
// PoolVecStates creates a pool variable for each VecState
// in a list of VecStates.
// It also puts the same variables in a slice of
// autofunc.Results for convenience.
func PoolVecStates(s []State) ([]*autofunc.Variable, []autofunc.Result) {
	vars := make([]*autofunc.Variable, len(s))
	reses := make([]autofunc.Result, len(s))
	for i, x := range s {
		vars[i] = &autofunc.Variable{Vector: linalg.Vector(x.(VecState))}
		reses[i] = vars[i]
	}
	return vars, reses
}
Beispiel #8
0
func (s *stateOutBlockResult) PropagateGradient(u []linalg.Vector, su []StateGrad,
	g autofunc.Gradient) []StateGrad {
	downstream := make([]StateGrad, len(s.WrappedOut.Outputs()))
	for i := range s.WrappedOut.Outputs() {
		var vec linalg.Vector
		if u != nil {
			vec = u[i].Copy()
		}
		if su != nil && su[i] != nil {
			sVec := su[i].(VecStateGrad)
			if vec == nil {
				vec = linalg.Vector(sVec).Copy()
			} else {
				vec.Add(linalg.Vector(sVec))
			}
		}
		if vec != nil {
			downstream[i] = VecStateGrad(vec)
		}
	}
	return s.WrappedOut.PropagateGradient(nil, downstream, g)
}
Beispiel #9
0
func (g *gruResult) PropagateGradient(u []linalg.Vector, s []StateGrad,
	grad autofunc.Gradient) []StateGrad {
	if len(g.InStates) == 0 {
		return nil
	}
	downstream := make(linalg.Vector, len(g.Output.Output()))
	cells := len(downstream) / len(g.InStates)
	for i, stateObj := range s {
		if stateObj != nil {
			state := linalg.Vector(stateObj.(VecStateGrad))
			downstream[i*cells : (i+1)*cells].Add(state)
		}
	}
	for i, uVec := range u {
		downstream[i*cells : (i+1)*cells].Add(uVec)
	}
	return PropagateVecStatePool(grad, g.InStates, func() {
		g.Output.PropagateGradient(downstream, grad)
	})
}
Beispiel #10
0
func rbmEnergy(r *RBM, input, output []bool) float64 {
	inputVec := make(linalg.Vector, len(input))
	for i, x := range input {
		if x {
			inputVec[i] = 1
		}
	}
	outputVec := make(linalg.Vector, len(output))
	for i, x := range output {
		if x {
			outputVec[i] = 1
		}
	}

	energy := inputVec.Dot(r.VisibleBiases)
	energy += outputVec.Dot(r.HiddenBiases)

	inputCol := linalg.NewMatrixColumn(inputVec)
	energy += outputVec.Dot(linalg.Vector(r.Weights.Mul(inputCol).Data))

	return -energy
}
Beispiel #11
0
func (g *gradientIterator) optimalStep(d linalg.Vector) float64 {
	// The optimal step size is (d'*b - c'*A*d)/(d'*A*d)
	// where d is the direction, A is the matrix, x is
	// the current approximate solution, and b is all 1's.

	dMat := &linalg.Matrix{
		Rows: len(d),
		Cols: 1,
		Data: d,
	}
	ad := linalg.Vector(g.matrix.Mul(dMat).Data)

	summer := kahan.NewSummer64()
	for _, x := range d {
		summer.Add(x)
	}

	numerator := summer.Sum() - g.solution.Dot(ad)
	denominator := d.Dot(ad)

	return numerator / denominator
}