Exemple #1
0
// InnerProduct computes a Eucledian inner product.
func (e *Euclidean) InnerProduct(vectorX *mat64.Dense, vectorY *mat64.Dense) float64 {
	subVector := mat64.NewDense(0, 0, nil)
	subVector.MulElem(vectorX, vectorY)
	result := mat64.Sum(subVector)

	return result
}
Exemple #2
0
func vectorDistance(vec1, vec2 *mat.Vector) (v float64) {
	result := mat.NewVector(vec1.Len(), nil)

	result.SubVec(vec1, vec2)
	result.MulElemVec(result, result)
	v = mat.Sum(result)

	return
}
Exemple #3
0
func rowSum(M mat.Matrix) mat.Matrix {
	rows, _ := M.Dims()

	floatRes := make([]float64, rows)
	for i := 0; i < rows; i++ {
		floatRes[i] = mat.Sum(getRowVector(i, M))
	}

	return mat.NewDense(rows, 1, floatRes)
}
Exemple #4
0
func columnSum(M mat.Matrix) mat.Matrix {
	_, cols := M.Dims()

	floatRes := make([]float64, cols)
	for i := 0; i < cols; i++ {
		floatRes[i] = mat.Sum(getColumnVector(i, M))
	}

	return mat.NewDense(1, cols, floatRes)
}
Exemple #5
0
func Cost(x *mat64.Dense, y, theta *mat64.Vector) float64 {
	//initialize receivers
	m, _ := x.Dims()
	h := mat64.NewDense(m, 1, make([]float64, m))
	squaredErrors := mat64.NewDense(m, 1, make([]float64, m))

	//actual calculus
	h.Mul(x, theta)
	squaredErrors.Apply(func(r, c int, v float64) float64 {
		return math.Pow(h.At(r, c)-y.At(r, c), 2)
	}, h)
	j := mat64.Sum(squaredErrors) * 1.0 / (2.0 * float64(m))

	return j
}
Exemple #6
0
func nnlsSubproblem(V, W, Ho *mat64.Dense, tol float64, outer, inner int) (H, G *mat64.Dense, i int, ok bool) {
	H = new(mat64.Dense)
	H.Clone(Ho)

	var WtV, WtW mat64.Dense
	WtV.Mul(W.T(), V)
	WtW.Mul(W.T(), W)

	alpha, beta := 1., 0.1

	decFilt := func(r, c int, v float64) float64 {
		// decFilt is applied to G, so v = G.At(r, c).
		if v < 0 || H.At(r, c) > 0 {
			return v
		}
		return 0
	}

	G = new(mat64.Dense)
	for i = 0; i < outer; i++ {
		G.Mul(&WtW, H)
		G.Sub(G, &WtV)
		G.Apply(decFilt, G)

		if mat64.Norm(G, 2) < tol {
			break
		}

		var (
			reduce bool
			Hp     *mat64.Dense
			d, dQ  mat64.Dense
		)
		for j := 0; j < inner; j++ {
			var Hn mat64.Dense
			Hn.Scale(alpha, G)
			Hn.Sub(H, &Hn)
			Hn.Apply(posFilt, &Hn)

			d.Sub(&Hn, H)
			dQ.Mul(&WtW, &d)
			dQ.MulElem(&dQ, &d)
			d.MulElem(G, &d)

			sufficient := 0.99*mat64.Sum(&d)+0.5*mat64.Sum(&dQ) < 0

			if j == 0 {
				reduce = !sufficient
				Hp = H
			}
			if reduce {
				if sufficient {
					H = &Hn
					ok = true
					break
				} else {
					alpha *= beta
				}
			} else {
				if !sufficient || mat64.Equal(Hp, &Hn) {
					H = Hp
					break
				} else {
					alpha /= beta
					Hp = &Hn
				}
			}
		}
	}

	return H, G, i, ok
}
Exemple #7
0
// Fit trains the neural network on the given fixed datagrid.
//
// Training stops when the mean-squared error acheived is less
// than the Convergence value, or when back-propagation has occured
// more times than the value set by MaxIterations.
func (m *MultiLayerNet) Fit(X base.FixedDataGrid) {

	// Make sure everything's a FloatAttribute
	insts := m.convertToFloatInsts(X)

	// The size of the first layer is the number of things
	// in the revised instances which aren't class Attributes
	inputAttrsVec := base.NonClassAttributes(insts)

	// The size of the output layer is the number of things
	// in the revised instances which are class Attributes
	classAttrsVec := insts.AllClassAttributes()

	// The total number of layers is input layer + output layer
	// plus number of layers specified
	totalLayers := 2 + len(m.layers)

	// The size is then augmented by the number of nodes
	// in the centre
	size := len(inputAttrsVec)
	size += len(classAttrsVec)
	hiddenSize := 0
	for _, a := range m.layers {
		size += a
		hiddenSize += a
	}

	// Enumerate the Attributes
	trainingAttrs := make(map[base.Attribute]int)
	classAttrs := make(map[base.Attribute]int)
	attrCounter := 0
	for i, a := range inputAttrsVec {
		attrCounter = i
		m.attrs[a] = attrCounter
		trainingAttrs[a] = attrCounter
	}
	m.classAttrOffset = attrCounter + 1
	for _, a := range classAttrsVec {
		attrCounter++
		m.attrs[a] = attrCounter + hiddenSize
		classAttrs[a] = attrCounter + hiddenSize
		m.classAttrCount++
	}

	// Create the underlying Network
	m.network = NewNetwork(size, len(inputAttrsVec), Sigmoid)

	// Initialise inter-hidden layer weights and biases to small random values
	layerOffset := len(inputAttrsVec)
	for i := 0; i < len(m.layers)-1; i++ {
		// Get the size of this layer
		thisLayerSize := m.layers[i]
		// Next layer size
		nextLayerSize := m.layers[i+1]
		// For every node in this layer
		for j := 1; j <= thisLayerSize; j++ {
			// Compute the offset
			nodeOffset1 := layerOffset + j
			// For every node in the next layer
			for k := 1; k <= nextLayerSize; k++ {
				// Compute offset
				nodeOffset2 := layerOffset + thisLayerSize + k
				// Set weight randomly
				m.network.SetWeight(nodeOffset1, nodeOffset2, rand.NormFloat64()*0.1)
			}
		}
		layerOffset += thisLayerSize
	}

	// Initialise biases with each hidden layer
	layerOffset = len(inputAttrsVec)
	for _, l := range m.layers {
		for j := 1; j <= l; j++ {
			nodeOffset := layerOffset + j
			m.network.SetBias(nodeOffset, rand.NormFloat64()*0.1)
		}
		layerOffset += l
	}

	// Initialise biases for output layer
	for i := 0; i < len(classAttrsVec); i++ {
		nodeOffset := layerOffset + i
		m.network.SetBias(nodeOffset, rand.NormFloat64()*0.1)
	}

	// Connect final hidden layer with the output layer
	layerOffset = len(inputAttrsVec)
	for i, l := range m.layers {
		if i == len(m.layers)-1 {
			for j := 1; j <= l; j++ {
				nodeOffset1 := layerOffset + j
				for k := 1; k <= len(classAttrsVec); k++ {
					nodeOffset2 := layerOffset + l + k
					m.network.SetWeight(nodeOffset1, nodeOffset2, rand.NormFloat64()*0.1)
				}
			}
		}
		layerOffset += l
	}

	// Connect input layer with first hidden layer (or output layer
	for i := 1; i <= len(inputAttrsVec); i++ {
		nextLayerLen := 0
		if len(m.layers) > 0 {
			nextLayerLen = m.layers[0]
		} else {
			nextLayerLen = len(classAttrsVec)
		}
		for j := 1; j <= nextLayerLen; j++ {
			nodeOffset := len(inputAttrsVec) + j
			v := rand.NormFloat64() * 0.1
			m.network.SetWeight(i, nodeOffset, v)
		}
	}

	// Create the training activation vector
	trainVec := mat64.NewDense(size, 1, make([]float64, size))
	// Create the error vector
	errVec := mat64.NewDense(size, 1, make([]float64, size))

	// Resolve training AttributeSpecs
	trainAs := base.ResolveAllAttributes(insts)

	// Feed-forward, compute error and update for each training example
	// until convergence (what's that)
	for iteration := 0; iteration < m.MaxIterations; iteration++ {
		totalError := 0.0
		maxRow := 0
		insts.MapOverRows(trainAs, func(row [][]byte, i int) (bool, error) {

			maxRow = i
			// Clear vectors
			for i := 0; i < size; i++ {
				trainVec.Set(i, 0, 0.0)
				errVec.Set(i, 0, 0.0)
			}

			// Build vectors
			for i, vb := range row {
				v := base.UnpackBytesToFloat(vb)
				if attrIndex, ok := trainingAttrs[trainAs[i].GetAttribute()]; ok {
					// Add to Activation vector
					trainVec.Set(attrIndex, 0, v)
				} else if attrIndex, ok := classAttrs[trainAs[i].GetAttribute()]; ok {
					// Set to error vector
					errVec.Set(attrIndex, 0, v)
				} else {
					panic("Should be able to find this Attribute!")
				}
			}

			// Activate the network
			m.network.Activate(trainVec, totalLayers-1)

			// Compute the error
			for a := range classAttrs {
				cIndex := classAttrs[a]
				errVec.Set(cIndex, 0, errVec.At(cIndex, 0)-trainVec.At(cIndex, 0))
			}

			// Update total error
			totalError += math.Abs(mat64.Sum(errVec))

			// Back-propagate the error
			b := m.network.Error(trainVec, errVec, totalLayers)

			// Update the weights
			m.network.UpdateWeights(trainVec, b, m.LearningRate)

			// Update the biases
			m.network.UpdateBias(b, m.LearningRate)

			return true, nil
		})

		totalError /= float64(maxRow)
		// If we've converged, no need to carry on
		if totalError < m.Convergence {
			break
		}
	}
}