예제 #1
0
func (this *TrainingSet) AddX0() {
	m := this.Xs.GetMRows()
	x0 := Matrix.NullMatrix(m, 1)

	for i := 1; i <= m; i++ {
		x0.SetValue(i, 1, 1.0)
	}

	this.Xs = x0.AddColumn(this.Xs)
}
예제 #2
0
파일: ANN.go 프로젝트: eddytrex/AIgo
func CreateANN(Inputs int, NeuronsByLayer []int, Act func(*Matrix.Matrix) *Matrix.Matrix, Derivate func(*Matrix.Matrix) *Matrix.Matrix, Cost func(*Matrix.Matrix, *Matrix.Matrix) *Matrix.Matrix, DCost func(*Matrix.Matrix, *Matrix.Matrix) *Matrix.Matrix, path string) ANN {

	var out ANN

	out.Weights = make([]*Matrix.Matrix, len(NeuronsByLayer), len(NeuronsByLayer))
	out.BestWeightsFound = make([]*Matrix.Matrix, len(NeuronsByLayer), len(NeuronsByLayer))
	out.LearningRates = make([]*Matrix.Matrix, len(NeuronsByLayer), len(NeuronsByLayer))

	out.Δ = make([]*Matrix.Matrix, len(NeuronsByLayer), len(NeuronsByLayer))
	out.Δ1 = make([]*Matrix.Matrix, len(NeuronsByLayer), len(NeuronsByLayer))

	out.ð = make([]*Matrix.Matrix, len(NeuronsByLayer)+1, len(NeuronsByLayer)+1)

	out.Inputs = Inputs
	out.Outputs = NeuronsByLayer[len(NeuronsByLayer)-1]

	out.ActivationLayer = Act
	out.DarivateActivationLayer = Derivate

	out.CostFunction = Cost
	out.DerviateCostFunction = DCost
	out.PathWeightsInCSV = path
	m := Inputs
	for i := 0; i < (len(NeuronsByLayer)); i++ {

		n := NeuronsByLayer[i]

		// one row extra for Bias weights, we need to change to random values for this matrixes
		//temp := Matrix.RandomRealMatrix(m+1, n)

		out.Weights[i] = Matrix.RandomRealMatrix(m+1, n, 1.2)
		out.BestWeightsFound[i] = Matrix.NullMatrixP(m+1, n)
		out.LearningRates[i] = Matrix.FixValueMatrix(m+1, n, 0.0001)

		//tempdelta := Matrix.NullMatrix(m+1, n)
		out.ð[i] = Matrix.NullMatrix(m+1, n)

		out.Δ[i] = Matrix.NullMatrixP(m+1, n)
		out.Δ1[i] = Matrix.NullMatrixP(m+1, n)
		m = n

	}

	out.AcumatedError = Matrix.NullMatrixP(m, 1)
	out.MinimumErrorFound = Matrix.NullMatrixP(m, 1)
	out.AcumatedError1 = Matrix.NullMatrixP(m, 1)
	return out
}
예제 #3
0
파일: main.go 프로젝트: eddytrex/AIgo
func main() {

	l := make([]int, 3)
	l[0] = 2
	l[1] = 2
	l[2] = 2

	ann := ANN.CreateANN(2, l, ANN.SigmoidLayer, ANN.DSigmoidLayer, ANN.HalfDistance, ANN.DerivateHalfDistance, "plot")

	p1 := Matrix.NullMatrix(2, 1)
	p1.SetValue(1, 1, 1.0)
	p1.SetValue(1, 2, 1.0)
	ro1 := Matrix.NullMatrix(2, 1)
	ro1.SetValue(1, 1, 0.0)
	ro1.SetValue(2, 1, 1.0)

	p2 := Matrix.NullMatrix(2, 1)
	p2.SetValue(1, 1, 1.0)
	p2.SetValue(1, 2, 0.0)
	ro2 := Matrix.NullMatrix(2, 1)
	ro2.SetValue(1, 1, 1.0)
	ro2.SetValue(2, 1, 0.0)

	p3 := Matrix.NullMatrix(2, 1)
	p3.SetValue(1, 1, 0.0)
	p3.SetValue(1, 2, 1.0)
	ro3 := Matrix.NullMatrix(2, 1)
	ro3.SetValue(1, 1, 1.0)
	ro3.SetValue(2, 1, 0.0)

	p4 := Matrix.NullMatrix(2, 1)
	p4.SetValue(1, 1, 0.0)
	p4.SetValue(1, 2, 0.0)
	ro4 := Matrix.NullMatrix(2, 1)
	ro4.SetValue(1, 1, 0.0)
	ro4.SetValue(2, 1, 1.0)

	Inputs := make([]*Matrix.Matrix, 4)
	ROutputs := make([]*Matrix.Matrix, 4)

	Inputs[0] = p1
	Inputs[1] = p2
	Inputs[2] = p3
	Inputs[3] = p4

	ROutputs[0] = ro1
	ROutputs[1] = ro2
	ROutputs[2] = ro3
	ROutputs[3] = ro4

	ann.Train(Inputs, ROutputs, 0.01, 0.65, 0.0001, 1000)

	_, _, Output := ann.ForwardPropagation(Inputs[0])
	fmt.Println(Output.ToString())

	_, _, Output = ann.ForwardPropagation(Inputs[1])
	fmt.Println(Output.ToString())
	_, _, Output = ann.ForwardPropagation(Inputs[2])
	fmt.Println(Output.ToString())
	_, _, Output = ann.ForwardPropagation(Inputs[3])
	fmt.Println(Output.ToString())

}