Example #1
0
func (u *UnstackLayer) Apply(in autofunc.Result) autofunc.Result {
	return &unstackLayerResult{
		OutputVector: u.unstack(in.Output()),
		Input:        in,
		Layer:        u,
	}
}
Example #2
0
func (b *BorderLayer) Apply(in autofunc.Result) autofunc.Result {
	return &borderResult{
		OutputVec: b.addBorder(in.Output()),
		Input:     in,
		Info:      b,
	}
}
Example #3
0
func (d *DropoutLayer) Apply(in autofunc.Result) autofunc.Result {
	if d.Training {
		return autofunc.Mul(in, d.dropoutMask(len(in.Output())))
	} else {
		return autofunc.Scale(in, d.KeepProbability)
	}
}
Example #4
0
func (g *GaussNoiseLayer) Apply(in autofunc.Result) autofunc.Result {
	if g.Training {
		return autofunc.Add(in, g.noise(len(in.Output())))
	} else {
		return in
	}
}
Example #5
0
func networkOutput(r autofunc.Result) int {
	out := r.Output()
	var maxIdx int
	var max float64
	for i, x := range out {
		if i == 0 || x > max {
			max = x
			maxIdx = i
		}
	}
	return maxIdx
}
Example #6
0
func (_ ReLU) Apply(r autofunc.Result) autofunc.Result {
	inVec := r.Output()
	vec := make(linalg.Vector, len(inVec))
	for i, x := range inVec {
		if x > 0 {
			vec[i] = x
		}
	}
	return &reLUResult{
		OutputVec: vec,
		Input:     r,
	}
}
Example #7
0
func (s *LogSoftmaxLayer) Apply(in autofunc.Result) autofunc.Result {
	return autofunc.Pool(in, func(in autofunc.Result) autofunc.Result {
		// Compute the log of the sum of the exponents by
		// factoring out the largest exponent so that all
		// the exponentials fit nicely inside floats.
		maxIdx := maxVecIdx(in.Output())
		maxValue := autofunc.Slice(in, maxIdx, maxIdx+1)
		exponents := autofunc.AddFirst(in, autofunc.Scale(maxValue, -1))
		expSum := autofunc.SumAll(autofunc.Exp{}.Apply(exponents))
		expLog := autofunc.Log{}.Apply(expSum)
		denomLog := autofunc.Add(expLog, maxValue)
		return autofunc.AddFirst(in, autofunc.Scale(denomLog, -1))
	})
}
Example #8
0
// Batch applies the layer to inputs in batch.
func (m *MaxPoolingLayer) Batch(in autofunc.Result, n int) autofunc.Result {
	outSize := m.OutputWidth() * m.OutputHeight() * m.InputDepth
	inSize := m.InputWidth * m.InputHeight * m.InputDepth
	if len(in.Output()) != n*inSize {
		panic("invalid input size")
	}
	res := &maxPoolingResult{
		OutputVec: make(linalg.Vector, outSize*n),
		Input:     in,
		Layer:     m,
	}
	for i := 0; i < n; i++ {
		outTensor := m.outputTensor(res.OutputVec[i*outSize : (i+1)*outSize])
		inTensor := m.inputTensor(in.Output()[i*inSize : (i+1)*inSize])
		choices := m.evaluate(inTensor, outTensor)
		res.Choices = append(res.Choices, choices)
	}
	return res
}
Example #9
0
func (l *lstmGate) Batch(in autofunc.Result, n int) autofunc.Result {
	if l.Peephole == nil {
		return l.Activation.Apply(l.Dense.Batch(in, n))
	}
	return autofunc.Pool(in, func(in autofunc.Result) autofunc.Result {
		vecSize := len(in.Output()) / n
		var weightedInputs []autofunc.Result
		var peepholed []autofunc.Result
		for i := 0; i < n; i++ {
			start := vecSize * i
			weightedEnd := start + vecSize - len(l.Peephole.Vector)
			weightedInputs = append(weightedInputs, autofunc.Slice(in, start, weightedEnd))
			peepholeMe := autofunc.Slice(in, weightedEnd, (i+1)*vecSize)
			peepholed = append(peepholed, autofunc.Mul(l.Peephole, peepholeMe))
		}
		weighted := l.Dense.Batch(autofunc.Concat(weightedInputs...), n)
		return l.Activation.Apply(autofunc.Add(autofunc.Concat(peepholed...), weighted))
	})
}
Example #10
0
// Batch applies the layer to inputs in batch.
func (c *ConvLayer) Batch(in autofunc.Result, n int) autofunc.Result {
	if c.Filters == nil || c.Biases == nil || c.FilterVar == nil {
		panic(uninitPanicMessage)
	}
	outSize := c.OutputWidth() * c.OutputHeight() * c.OutputDepth()
	inSize := c.InputWidth * c.InputHeight * c.InputDepth
	if len(in.Output()) != n*inSize {
		panic("invalid input size")
	}
	res := &convLayerResult{
		OutputVec: make(linalg.Vector, outSize*n),
		Input:     in,
		N:         n,
		Layer:     c,
	}
	for i := 0; i < n; i++ {
		subIn := in.Output()[i*inSize : (i+1)*inSize]
		subOut := res.OutputVec[i*outSize : (i+1)*outSize]
		c.convolve(subIn, c.outputToTensor(subOut))
	}
	return res
}