func (self SGD) PredictM(x mat.MatrixRO) (mat.MatrixRO, error) { if self.f != nil { if x.Cols() != self.InputDims() { return nil, fmt.Errorf("x has %d columns. Expected %d.", x.Cols(), self.InputDims()) } // Add biases to the input matrix xb := self.addBiasToMatrix(x) // Make predictions for each row return self.f.PredictM(xb) } else { return nil, fmt.Errorf("Cannot predict before running Fit().") } }
func (self SGD) Predict(x mat.MatrixRO) (float64, error) { if self.f != nil { if x.Cols() != self.InputDims() { return 0, fmt.Errorf("x has %d columns. Expected %d.", x.Cols(), self.InputDims()) } // Add a bias to the input vector xb := self.addBiasToMatrix(x) // Make a prediction return self.f.Predict(xb) } else { return 0, fmt.Errorf("Cannot predict before running Fit().") } }
/* Apply applies the function f to each element in A and returns a new matrix with the results. Input ===== A : a matrix f : a function from scalar values to scalar values Returns ======= a matrix derived by applying f to each element in A. If f is nil, then this function just returns A. */ func Apply(A mat.MatrixRO, f SFunction) mat.MatrixRO { if f == nil { return A } B := mat.Zeros(A.Rows(), A.Cols()) for r := 0; r < A.Rows(); r++ { for c := 0; c < A.Cols(); c++ { x := A.Get(r, c) y := f(x) B.Set(r, c, y) } } return B }
func (self SGD) addBiasToMatrix(x mat.MatrixRO) *mat.DenseMatrix { xb := mat.Ones(x.Rows(), self.InputDims()+1) for r := 0; r < x.Rows(); r++ { for i := 0; i < self.InputDims(); i++ { xb.Set(r, i, x.Get(r, i)) } } return xb }
func (f LinearFunction) Predict(x mat.MatrixRO) (float64, error) { if x.Cols() != f.InputDims() { return 0, fmt.Errorf("x has %d columns. Expected %d.", x.Cols(), f.InputDims()) } value, err := x.Times(&f.Weights) if f.AFunc != nil { return f.AFunc.Eval(value.Get(0, 0)), err } else { return value.Get(0, 0), err } }
func (f LinearFunction) PredictM(x mat.MatrixRO) (mat.MatrixRO, error) { if x.Cols() != f.InputDims() { return nil, fmt.Errorf("x has %d columns. Expected %d.", x.Cols(), f.InputDims()) } y, err := x.Times(&f.Weights) if err != nil { return nil, fmt.Errorf("Error predicting before applying activation function. %v", err) } var yprime mat.MatrixRO = nil if f.AFunc != nil { yprime = Apply(y, f.AFunc.Eval) } else { yprime = y } return yprime, nil }
func (self *SGD) Fit(x mat.MatrixRO, y mat.MatrixRO) error { if x.Rows() != y.Rows() { return fmt.Errorf("The number of rows in x (%d) does not match the number of rows in y (%d). The matrix x should contain one input vector per row and the vector y should be a column vector containing labels for each input vector.", x.Rows(), y.Rows()) } if y.Cols() != 1 { return fmt.Errorf("y must be a column vector.") } // The number of samples in the data set n := x.Rows() // Get a dense version of the input matrix dx := x.DenseMatrix() // If there is no LinearModel yet, then we add one. if self.f == nil { self.inputDims = x.Cols() self.f = new(LinearFunction) self.f.Weights = *mat.Zeros(self.inputDims+1, 1) self.f.AFunc = self.afunc } else if self.inputDims != x.Cols() { // If there is an existing linear model, then we only train on // additional samples if they match the dimensionality of the previous // training data. return fmt.Errorf("The number of columns in matrix x does not match the dimension of previous training data. Please construct a new SGD instance.") } for i := 0; i < self.NumIterations; i++ { index := rand.Intn(n) xrow := dx.GetRowVector(index) xrowb := self.addBiasToMatrix(xrow) yhat, err := self.f.Predict(xrowb) if err != nil { return fmt.Errorf("Error while predicting with internal linear model. %v", err) } // Compute the activation function's derivative yhatPrime := 0.0 if self.afunc != nil { yNoAct, err := xrowb.Times(&self.f.Weights) if err != nil { return fmt.Errorf("Error while predicting before applying the activation function. %v", err) } yhatPrime = self.afunc.Deriv(yNoAct.Get(0, 0)) } else { yhatPrime = 1 } diff := y.Get(index, 0) - yhat for j := 0; j < self.inputDims+1; j++ { // Get the old weight value oldw := self.f.Weights.Get(j, 0) // Calculate the gradient of the squared error grad := 0.0 if j < self.inputDims { grad = (diff * yhatPrime * -xrow.Get(0, j)) } else { // Gradient for the bias grad = -diff * yhatPrime } // Calculate the gradient of the regularization penalty gpen := 0.0 if self.PenaltyType == L1_PENALTY { gpen = self.Lambda * signum(oldw) } else { gpen = self.Lambda * oldw } // Calculate the change in weight alpha := self.LearningRate / float64(self.inputDims) deltaw := alpha * (grad + gpen) neww := oldw - deltaw // Set the new weight self.f.Weights.Set(j, 0, neww) } } return nil }