// BatchR is like Batch, but for RResults. func (c *ConvLayer) BatchR(rv autofunc.RVector, in autofunc.RResult, n int) autofunc.RResult { if c.Filters == nil || c.Biases == nil || c.FilterVar == nil { panic(uninitPanicMessage) } outSize := c.OutputWidth() * c.OutputHeight() * c.OutputDepth() inSize := c.InputWidth * c.InputHeight * c.InputDepth if len(in.Output()) != n*inSize { panic("invalid input size") } res := &convLayerRResult{ OutputVec: make(linalg.Vector, outSize*n), ROutputVec: make(linalg.Vector, outSize*n), Input: in, FiltersR: rv[c.FilterVar], N: n, Layer: c, } for i := 0; i < n; i++ { subIn := in.Output()[i*inSize : (i+1)*inSize] subOut := res.OutputVec[i*outSize : (i+1)*outSize] c.convolve(subIn, c.outputToTensor(subOut)) subInR := in.ROutput()[i*inSize : (i+1)*inSize] subOutR := res.ROutputVec[i*outSize : (i+1)*outSize] c.convolveR(rv, subIn, subInR, c.outputToTensor(subOutR)) } return res }
// BatchR is like Batch, but for RResults. func (m *MaxPoolingLayer) BatchR(rv autofunc.RVector, in autofunc.RResult, n int) autofunc.RResult { outSize := m.OutputWidth() * m.OutputHeight() * m.InputDepth inSize := m.InputWidth * m.InputHeight * m.InputDepth if len(in.Output()) != n*inSize { panic("invalid input size") } res := &maxPoolingRResult{ OutputVec: make(linalg.Vector, outSize*n), ROutputVec: make(linalg.Vector, outSize*n), Input: in, Layer: m, } for i := 0; i < n; i++ { outTensor := m.outputTensor(res.OutputVec[i*outSize : (i+1)*outSize]) inTensor := m.inputTensor(in.Output()[i*inSize : (i+1)*inSize]) choices := m.evaluate(inTensor, outTensor) res.Choices = append(res.Choices, choices) outTensorR := m.outputTensor(res.ROutputVec[i*outSize : (i+1)*outSize]) inTensorR := m.inputTensor(in.ROutput()[i*inSize : (i+1)*inSize]) choices.ForwardPropagate(inTensorR, outTensorR) } return res }
func manualNetworkSeq(rv autofunc.RVector, f autofunc.RFunc, start *autofunc.Variable, ins [][]*autofunc.Variable, stateSize int) (out, outR [][]linalg.Vector) { out = make([][]linalg.Vector, len(ins)) outR = make([][]linalg.Vector, len(ins)) for seqIdx, inSeq := range ins { var state autofunc.RResult = autofunc.NewRVariable(start, rv) for _, in := range inSeq { inR := rv[in] packedIn := append(linalg.Vector{}, in.Output()...) packedIn = append(packedIn, state.Output()...) packedInR := append(linalg.Vector{}, inR...) packedInR = append(packedInR, state.ROutput()...) stepOut := f.ApplyR(rv, &autofunc.RVariable{ Variable: &autofunc.Variable{Vector: packedIn}, ROutputVec: packedInR, }) outSize := len(stepOut.Output()) - stateSize out[seqIdx] = append(out[seqIdx], stepOut.Output()[:outSize]) outR[seqIdx] = append(outR[seqIdx], stepOut.ROutput()[:outSize]) state = &autofunc.RVariable{ Variable: &autofunc.Variable{Vector: stepOut.Output()[outSize:]}, ROutputVec: stepOut.ROutput()[outSize:], } } } return }
func (g *GaussNoiseLayer) ApplyR(v autofunc.RVector, in autofunc.RResult) autofunc.RResult { if g.Training { return autofunc.AddR(in, g.noiseR(len(in.Output()))) } else { return in } }
func (u *UnstackLayer) ApplyR(v autofunc.RVector, in autofunc.RResult) autofunc.RResult { return &unstackLayerRResult{ OutputVector: u.unstack(in.Output()), ROutputVector: u.unstack(in.ROutput()), Input: in, Layer: u, } }
func (b *BorderLayer) ApplyR(rv autofunc.RVector, in autofunc.RResult) autofunc.RResult { return &borderRResult{ OutputVec: b.addBorder(in.Output()), ROutputVec: b.addBorder(in.ROutput()), Input: in, Info: b, } }
func (d *DropoutLayer) ApplyR(v autofunc.RVector, in autofunc.RResult) autofunc.RResult { if d.Training { mask := d.dropoutMask(len(in.Output())) maskVar := autofunc.NewRVariable(mask, v) return autofunc.MulR(in, maskVar) } else { return autofunc.ScaleR(in, d.KeepProbability) } }
func (v *VecRescaleLayer) ApplyR(rv autofunc.RVector, in autofunc.RResult) autofunc.RResult { zeroVec := make(linalg.Vector, len(in.Output())) biases := &autofunc.RVariable{ Variable: &autofunc.Variable{Vector: v.Biases}, ROutputVec: zeroVec, } scales := &autofunc.RVariable{ Variable: &autofunc.Variable{Vector: v.Scales}, ROutputVec: zeroVec, } return autofunc.MulR(autofunc.AddR(in, biases), scales) }
func (s *LogSoftmaxLayer) ApplyR(v autofunc.RVector, in autofunc.RResult) autofunc.RResult { return autofunc.PoolR(in, func(in autofunc.RResult) autofunc.RResult { // See comment in Apply() for details on how this works. maxIdx := maxVecIdx(in.Output()) maxValue := autofunc.SliceR(in, maxIdx, maxIdx+1) exponents := autofunc.AddFirstR(in, autofunc.ScaleR(maxValue, -1)) expSum := autofunc.SumAllR(autofunc.Exp{}.ApplyR(v, exponents)) expLog := autofunc.Log{}.ApplyR(v, expSum) denomLog := autofunc.AddR(expLog, maxValue) return autofunc.AddFirstR(in, autofunc.ScaleR(denomLog, -1)) }) }
func (_ ReLU) ApplyR(v autofunc.RVector, r autofunc.RResult) autofunc.RResult { outVec := r.Output() outVecR := r.ROutput() vec := make(linalg.Vector, len(outVec)) vecR := make(linalg.Vector, len(outVec)) for i, x := range outVec { if x > 0 { vec[i] = x vecR[i] = outVecR[i] } } return &reLURResult{ OutputVec: vec, ROutputVec: vecR, Input: r, } }
func (l *lstmGate) BatchR(rv autofunc.RVector, in autofunc.RResult, n int) autofunc.RResult { if l.Peephole == nil { return l.Activation.ApplyR(rv, l.Dense.BatchR(rv, in, n)) } return autofunc.PoolR(in, func(in autofunc.RResult) autofunc.RResult { vecSize := len(in.Output()) / n var weightedInputs []autofunc.RResult var peepholed []autofunc.RResult peephole := autofunc.NewRVariable(l.Peephole, rv) for i := 0; i < n; i++ { start := vecSize * i weightedEnd := start + vecSize - len(l.Peephole.Vector) weightedInputs = append(weightedInputs, autofunc.SliceR(in, start, weightedEnd)) peepholeMe := autofunc.SliceR(in, weightedEnd, (i+1)*vecSize) peepholed = append(peepholed, autofunc.MulR(peephole, peepholeMe)) } weighted := l.Dense.BatchR(rv, autofunc.ConcatR(weightedInputs...), n) joinedPeep := autofunc.ConcatR(peepholed...) return l.Activation.ApplyR(rv, autofunc.AddR(joinedPeep, weighted)) }) }