func (this *TrainingSet) Variance_sum(i0, i1 int, mean *Matrix.Matrix, res **Matrix.Matrix, sustract *Matrix.Matrix, done chan<- bool) { di := i1 - i0 if di >= THRESHOLD { mi := i0 + di/2 done2 := make(chan bool, THRESHOLD) res1 := Matrix.NullMatrixP(1, this.Xs.GetNColumns()) res2 := Matrix.NullMatrixP(1, this.Xs.GetNColumns()) go this.Variance_sum(i0, mi, mean, &res1, sustract, done2) go this.Variance_sum(mi, i1, mean, &res1, sustract, done2) <-done2 <-done2 SP, _ := Matrix.Sum(res1, res2) *res = SP } else { for i := i0; i <= i1; i++ { xsi := this.Xs.GetRow(i) Sustract, _ := Matrix.Sustract(mean, xsi) Square := Matrix.DotMultiplication(Sustract, Sustract) sustract.SetRow(i, Sustract) SP, _ := Matrix.Sum(Square, *res) *res = SP } } done <- true }
func FFT_ct(this *Matrix.Matrix, N, skip int, tf *[]complex128) *Matrix.Matrix { Xr := Matrix.NullMatrixP(N, this.GetNColumns()) RowTemp := Matrix.NullMatrixP(1, this.GetNColumns()) FFT_aux(this, Xr, RowTemp, N, skip, tf) return Xr }
func MakeTrainingSet(xs *Matrix.Matrix, y *Matrix.Matrix) *TrainingSet { var out TrainingSet if xs.GetMRows() == y.GetMRows() { out.Xs = xs out.Y = y return &out } return nil }
func FFT(this *Matrix.Matrix, N int) (*Matrix.Matrix, error) { if N > this.GetMRows() { return nil, errors.New(" The number of Rows of the matrix (this) must be greater or equal than N ") } if N&(N-1) == 0 { tf := TwiddleFactors(N, false) Xr := FFT_ct3(this, N, 1, &tf) return Xr, nil } return nil, errors.New(" The N parameter has to be power of 2") }
func IFFT(this *Matrix.Matrix, N int) (*Matrix.Matrix, error) { if N > this.GetMRows() { return nil, errors.New(" The number of Rows of the matrix (this) must be greater or equal than N ") } if N&(N-1) == 0 { tf := TwiddleFactors(N, true) Xr := FFT_ct(this, N, 1, &tf) Xr = Xr.Scalar(complex(float64(1)/float64(N), 0)) return Xr, nil } return nil, errors.New(" The N parameter has to be power of 2") }
func DSoftmax(X *Matrix.Matrix) *Matrix.Matrix { Total := 1 / X.TaxicabNorm() Y := X.Scalar(complex(Total, 0)) S, _ := Matrix.Sustract(Matrix.FixValueMatrix(X.GetNColumns(), X.GetNColumns(), 1.0), X) YD := Matrix.DotMultiplication(Y, S) return YD }
func (this *Hypothesis) part_DiffH1Ys(i0, i1 int, Ts *TrainingSet, Ret *Matrix.Matrix, RetT *Matrix.Matrix, done chan<- bool) { di := i1 - i0 if di >= THRESHOLD && runtime.NumGoroutine() < maxGoRoutines { done2 := make(chan bool, THRESHOLD) mi := i0 + di/2 go this.part_DiffH1Ys(i0, mi, Ts, Ret, RetT, done2) go this.part_DiffH1Ys(mi, i1, Ts, Ret, RetT, done2) <-done2 <-done2 } else { for i := i0; i <= i1; i++ { xi := Ts.Xs.GetRow(i) Thi := Matrix.Product(xi, this.ThetaParameters.Transpose()) temp := this.H(Thi.GetValue(1, 1)) - Ts.Y.GetValue(1, i) Ret.SetValue(i, 1, temp) RetT.SetValue(1, i, temp) } } done <- true }
//TODO the activation function and his Derviate has to be more general.. to implemente soft-max for example func (this *ANN) ForwardPropagation(In *Matrix.Matrix) (As, AsDerviate *([]*Matrix.Matrix), Output *Matrix.Matrix) { if In.GetMRows() == this.Inputs && In.GetNColumns() == 1 { As1 := make([]*Matrix.Matrix, len(this.Weights)+1, len(this.Weights)+1) AsDerviate1 := make([]*Matrix.Matrix, len(this.Weights)+1, len(this.Weights)+1) As := &As1 AsDerviate = &AsDerviate1 sTemp := In.Transpose() //Add a new column for a Bias Weight sTemp = sTemp.AddColumn(Matrix.I(1)) holeInput := sTemp.Copy() As1[0] = sTemp.Transpose() //Derivate //sutract, _ := Matrix.Sustract(Matrix.OnesMatrix(As1[0].GetMRows(), 1), As1[0]) //derivate := Matrix.DotMultiplication(As1[0], sutract) //derivate := holeInput.Apply(this.Derivate) derivate := this.DarivateActivationLayer(holeInput) AsDerviate1[0] = derivate.Transpose() for i := 0; i < len(this.Weights); i++ { sTemp = Matrix.Product(sTemp, (this.Weights[i])) //apply the activation functions holeInput := sTemp.Copy() sTemp = this.ActivationLayer(sTemp) //sTemp = sTemp.Apply(this.Activation) //Add a new column for a Bias Weight sTemp = sTemp.AddColumn(Matrix.I(1)) (*As)[i+1] = sTemp.Transpose() //Derivate //sutract, _ := Matrix.Sustract(Matrix.OnesMatrix((*As)[i+1].GetMRows(), 1), (*As)[i+1]) //derivate := Matrix.DotMultiplication((*As)[i+1], sutract) derivate := this.DarivateActivationLayer(holeInput) //derivate := holeInput.Apply(this.Derivate) (*AsDerviate)[i+1] = derivate.Transpose() } Asf := sTemp.Copy() //Asf = Asf.AddColumn(Matrix.I(1)) (*As)[len(As1)-1] = Asf.Transpose() Output = sTemp.Transpose().MatrixWithoutLastRow() return As, AsDerviate, Output } return nil, nil, nil }
func CorssEntorpy(T, O *Matrix.Matrix) *Matrix.Matrix { log := func(x complex128) complex128 { return cmplx.Log(x) } return Matrix.DotMultiplication(T, O.Apply(log)) }
func FFT_ct2(this *Matrix.Matrix, N, skip int, tf *[]complex128) *Matrix.Matrix { Xr := Matrix.NullMatrixP(N, this.GetNColumns()) Scratch := Matrix.NullMatrixP(N, this.GetNColumns()) var E, D, Xp, Xstart *Matrix.Matrix var evenIteration bool if N%2 == 0 { evenIteration = true } else { evenIteration = false } if N == 1 { Xr.SetRow(1, this.GetReferenceRow(1)) } E = this for n := 1; n < N; n *= 2 { if evenIteration { Xstart = Scratch } else { Xstart = Xr } skip := N / (2 * n) Xp = Xstart for k := 0; k != n; k++ { for m := 0; m != skip; m++ { D = E.MatrixWithoutFirstRows(skip) D.ScalarRow(1, (*tf)[skip*k]) sr, rr, _ := Matrix.Sum_Sustract(E.GetReferenceRow(1), D.GetReferenceRow(1)) Xp.SetRow(1, sr) Xp.SetRow(N/2+1, rr) Xp = Xp.MatrixWithoutFirstRows(1) E = E.MatrixWithoutFirstRows(1) } E = E.MatrixWithoutFirstRows(skip) } E = Xstart evenIteration = !evenIteration } return Scratch }
func FFT_aux(this, xr, RowTemp *Matrix.Matrix, N, skip int, tf *[]complex128) { if N == 1 { xr.SetRow(1, this.GetReferenceRow(1)) return } FFT_aux(this, xr, RowTemp, N/2, skip*2, tf) FFT_aux(this.MatrixWithoutFirstRows(skip), xr.MatrixWithoutFirstRows(N/2), RowTemp, N/2, skip*2, tf) for k := 0; k < N/2; k++ { xr.ScalarRowIntoRowMatrix(RowTemp, k+1+N/2, (*tf)[k*skip]) sr, rr, _ := Matrix.Sum_Sustract(xr.GetReferenceRow(k+1), RowTemp) xr.SetRow(k+1, sr) xr.SetRow(k+1+N/2, rr) } }
func FFT_ct3(this *Matrix.Matrix, N, skip int, tf *[]complex128) *Matrix.Matrix { Xr := Matrix.NullMatrixP(N, this.GetNColumns()) Scratch := Matrix.NullMatrixP(N, this.GetNColumns()) var E, D, Xp, Xstart *Matrix.Matrix var evenIteration bool if N%2 == 0 { evenIteration = true } else { evenIteration = false } if N == 1 { Xr.SetRow(1, this.GetReferenceRow(1)) } E = this for n := 1; n < N; n *= 2 { if evenIteration { Xstart = Scratch } else { Xstart = Xr } skip := N / (2 * n) Xp = Xstart for k := 0; k != n; k++ { var Aux = func(m0, m1 int, Xp, E, D *Matrix.Matrix) { println("-", m0) Xp = Xp.MatrixWithoutFirstRows(m0) E = E.MatrixWithoutFirstRows(m0) //D = E.MatrixWithoutFirstRows(skip) for m := m0; m < m1; m++ { D = E.MatrixWithoutFirstRows(skip) D.ScalarRow(1, (*tf)[skip*k]) sr, rr, _ := Matrix.Sum_Sustract(E.GetReferenceRow(1), D.GetReferenceRow(1)) Xp.SetRow(1, sr) Xp.SetRow(N/2+1, rr) Xp = Xp.MatrixWithoutFirstRows(1) println("E", E.ToString()) E = E.MatrixWithoutFirstRows(1) } } mm := skip / 2 m0 := 0 //m1 := skip go Aux(m0, mm, Xp, E, D) //println("->E", E.ToString(), ">XP", Xp.ToString()) //go Aux(mm, m1, Xp, E, D) //for m := 0; m != skip; m++ { // D = E.MatrixWithoutFirstRows(skip) // D.ScalarRow(1, (*tf)[skip*k]) // sr, rr, _ := Matrix.Sum_Sustract(E.GetReferenceRow(1), D.GetReferenceRow(1)) // Xp.SetRow(1, sr) // Xp.SetRow(N/2+1, rr) // Xp = Xp.MatrixWithoutFirstRows(1) // E = E.MatrixWithoutFirstRows(1) //} E = E.MatrixWithoutFirstRows(skip) } E = Xstart evenIteration = !evenIteration } return Scratch }
func Softmax(X *Matrix.Matrix) *Matrix.Matrix { Total := 1 / X.TaxicabNorm() Y := X.Scalar(complex(Total, 0)) return Y }
func DSigmoidLayer(X *Matrix.Matrix) *Matrix.Matrix { return X.Apply(DSigmoid) }