Ejemplo n.º 1
0
func main() {
	rand.Seed(time.Now().Unix())
	flag.Parse()
	data := LoadData(*idstr)
	avail := 0
	sample := len(data.Points) / 10
	for j := 0; j < sample; j++ {
		//sample a few routes, prefere close routes
		idx1 := rand.Intn(len(data.Points))
		target_x := data.Points[idx1].X + rand.NormFloat64()*150
		target_y := data.Points[idx1].Y + rand.NormFloat64()*150
		idx2 := 0
		mindist := 999999.0
		for id, val := range data.Points {
			if id == idx1 {
				continue
			}
			dist := Distance(&plotter.Point{target_x, target_y}, val)
			if dist < mindist {
				mindist = dist
				idx2 = id
			}
		}

		pathes := calcPathesFromTo(idx1, idx2, data.Matrix, 6)
		indep := IndependentPathes(pathes)
		fmt.Println(j, "/", sample)
		avail += len(indep)
	}
	a := float64(avail) / float64(sample)
	f, _ := os.Create(*idstr + ".indepPathes.txt")
	f.Write([]byte(fmt.Sprintf("%v\n", a)))
	fmt.Println(a)
}
Ejemplo n.º 2
0
func TestConvLayerBatchR(t *testing.T) {
	layer := &ConvLayer{
		FilterCount:  3,
		FilterWidth:  2,
		FilterHeight: 4,
		Stride:       2,
		InputHeight:  17,
		InputWidth:   19,
		InputDepth:   5,
	}
	layer.Randomize()

	n := 3
	batchInput := make(linalg.Vector, n*layer.InputWidth*layer.InputHeight*layer.InputDepth)
	for i := range batchInput {
		batchInput[i] = rand.NormFloat64()
	}
	batchRes := &autofunc.Variable{Vector: batchInput}

	params := []*autofunc.Variable{batchRes, layer.Biases, layer.FilterVar}

	rVec := autofunc.RVector{}
	for _, param := range params {
		vec := make(linalg.Vector, len(param.Vector))
		for i := range vec {
			vec[i] = rand.NormFloat64()
		}
		rVec[param] = vec
	}

	testRBatcher(t, rVec, layer, autofunc.NewRVariable(batchRes, rVec), n, params)
}
Ejemplo n.º 3
0
func benchmarkConvLayer(b *testing.B, layer *ConvLayer) {
	b.Run("Forward", func(b *testing.B) {
		benchmarkConvLayerForward(b, layer)
	})
	b.Run("Backward", func(b *testing.B) {
		benchmarkConvLayerBackward(b, layer)
	})
	b.Run("Parallel", func(b *testing.B) {
		parallelism := runtime.GOMAXPROCS(0)
		inputs := make(chan *autofunc.Variable, parallelism)
		upstreams := make(chan linalg.Vector, parallelism)
		grads := make(chan autofunc.Gradient, parallelism)
		for i := 0; i < parallelism; i++ {
			testInput := NewTensor3(layer.InputWidth, layer.InputHeight, layer.InputDepth)
			for i := range testInput.Data {
				testInput.Data[i] = rand.NormFloat64()
			}
			inputVar := &autofunc.Variable{Vector: testInput.Data}
			inputs <- inputVar
			upstream := make(linalg.Vector, len(layer.Apply(inputVar).Output()))
			for i := range upstream {
				upstream[i] = rand.NormFloat64()
			}
			upstreams <- upstream
			grad := autofunc.NewGradient(layer.Parameters())
			grads <- grad
		}
		b.ResetTimer()
		b.RunParallel(func(pb *testing.PB) {
			benchmarkConvLayerParallel(pb, layer, <-inputs, <-upstreams, <-grads)
		})
	})
}
Ejemplo n.º 4
0
func TestMatMul(t *testing.T) {
	const (
		m = 3
		k = 4
		n = 5
	)

	alpha := rand.NormFloat64()
	a, b := randMat(m, k), randMat(k, n)
	got := blas.MatMul(alpha, a, b)
	want := mat.Scale(alpha, mat.Mul(a, b))
	checkEqualMat(t, want, got, 1e-9)

	// Try with non-copying transposes.
	alpha = rand.NormFloat64()
	a, b = randMat(k, m).T(), randMat(k, n)
	got = blas.MatMul(alpha, a, b)
	want = mat.Scale(alpha, mat.Mul(a, b))
	checkEqualMat(t, want, got, 1e-9)

	alpha = rand.NormFloat64()
	a, b = randMat(m, k), randMat(n, k).T()
	got = blas.MatMul(alpha, a, b)
	want = mat.Scale(alpha, mat.Mul(a, b))
	checkEqualMat(t, want, got, 1e-9)

	alpha = rand.NormFloat64()
	a, b = randMat(k, m).T(), randMat(n, k).T()
	got = blas.MatMul(alpha, a, b)
	want = mat.Scale(alpha, mat.Mul(a, b))
	checkEqualMat(t, want, got, 1e-9)
}
Ejemplo n.º 5
0
func TestMaxPoolingBatchR(t *testing.T) {
	layer := &MaxPoolingLayer{
		XSpan:       5,
		YSpan:       4,
		InputWidth:  17,
		InputHeight: 19,
		InputDepth:  3,
	}

	n := 3
	batchInput := make(linalg.Vector, n*layer.InputWidth*layer.InputHeight*layer.InputDepth)
	for i := range batchInput {
		batchInput[i] = rand.NormFloat64()
	}
	batchRes := &autofunc.Variable{Vector: batchInput}

	rVec := autofunc.RVector{
		batchRes: make(linalg.Vector, len(batchInput)),
	}
	for i := range rVec[batchRes] {
		rVec[batchRes][i] = rand.NormFloat64()
	}

	testRBatcher(t, rVec, layer, autofunc.NewRVariable(batchRes, rVec),
		n, []*autofunc.Variable{batchRes})
}
Ejemplo n.º 6
0
func (s *S) TestSolveLUVec(c *check.C) {
	for _, n := range []int{5, 10} {
		a := NewDense(n, n, nil)
		for i := 0; i < n; i++ {
			for j := 0; j < n; j++ {
				a.Set(i, j, rand.NormFloat64())
			}
		}
		b := NewVector(n, nil)
		for i := 0; i < n; i++ {
			b.SetVec(i, rand.NormFloat64())
		}
		var lu LU
		lu.Factorize(a)
		var x Vector
		if err := x.SolveLUVec(&lu, false, b); err != nil {
			continue
		}
		var got Vector
		got.MulVec(a, &x)
		if !got.EqualsApproxVec(b, 1e-12) {
			c.Error("Solve mismatch n = %v.\nWant: %v\nGot: %v", n, b, got)
		}
	}
	// TODO(btracey): Add testOneInput test when such a function exists.
}
Ejemplo n.º 7
0
// Return a random point from the Normal distribution.
//
func Point3dNormal() Point3d {
	return Point3d{
		rand.NormFloat64(),
		rand.NormFloat64(),
		rand.NormFloat64(),
	}
}
Ejemplo n.º 8
0
func (b *block) addNode(n node) {
	if !b.nodes[n] {
		b.Add(n)
		n.Move(Pt(rand.NormFloat64(), rand.NormFloat64()))
		b.nodes[n] = true
		n.setBlock(b)
		switch n := n.(type) {
		case *callNode:
			if n.obj != nil && !isMethod(n.obj) {
				b.func_().addPkgRef(n.obj)
			}
		case *compositeLiteralNode:
			// handled in compositeLiteralNode.setType
		case *valueNode:
			switch obj := n.obj.(type) {
			case *types.Const, *types.Var:
				b.func_().addPkgRef(obj)
			case *types.Func:
				if !isMethod(obj) {
					b.func_().addPkgRef(obj)
				}
			}
		}
		rearrange(b)
	}
}
Ejemplo n.º 9
0
func Noise(l int) *ComplexV {
	r := Zeros(l)
	for k, _ := range *r {
		(*r)[k] = complex(rand.NormFloat64(), rand.NormFloat64())
	}
	return r
}
Ejemplo n.º 10
0
func generateMoveTrack(xPosAnswer int) [][]int {
	totalFrames := int(xPosAnswer/2) + rand.Intn(5)

	moveTrack := make([][]int, totalFrames)
	moveTrack[0] = []int{int(-rand.NormFloat64()*8.0 - 20.0), int(-rand.NormFloat64()*8.0 - 20.0), 0}
	moveTrack[1] = []int{0, 0, 0}

	periodParam := rand.Float64()
	for i := 2; i < totalFrames; i++ {
		moveTrack[i] = []int{0, int(math.Sin(float64(i)*periodParam*0.08) * 4.0), i*8 + rand.Intn(5)}
	}

	xPosBitmap := make([]bool, xPosAnswer)
	for i := 0; i < totalFrames-4; {
		t := &xPosBitmap[rand.Intn(xPosAnswer-1)]
		if !*t {
			*t = true
			i++
		}
	}
	xPosBitmap[xPosAnswer-1] = true

	k := 2
	for i, v := range xPosBitmap {
		if v {
			moveTrack[k][0] = i + 1
			k++
		}
	}

	copy(moveTrack[totalFrames-1], moveTrack[totalFrames-2])
	moveTrack[totalFrames-1][2] += 100 + rand.Intn(300)

	return moveTrack
}
Ejemplo n.º 11
0
func tree0(f croot.File) {
	// create a tree
	tree := croot.NewTree("tree", "tree", 32)
	e := &Event{}
	const bufsiz = 32000

	tree.Branch("evt", e, bufsiz, 0)

	// fill some events with random numbers
	nevents := *evtmax
	for iev := 0; iev != nevents; iev++ {
		if iev%1000 == 0 {
			fmt.Printf(":: processing event %d...\n", iev)
		}

		// the two energies follow a gaussian distribution
		e.A.E = rand.NormFloat64() //ea
		e.B.E = rand.NormFloat64() //eb

		e.A.T = croot.GRandom.Rndm(1)
		e.B.T = e.A.T * croot.GRandom.Gaus(0., 1.)
		if iev%1000 == 0 {
			fmt.Printf("ievt: %d\n", iev)
			fmt.Printf("evt.a.e= %8.3f\n", e.A.E)
			fmt.Printf("evt.a.t= %8.3f\n", e.A.T)
			fmt.Printf("evt.b.e= %8.3f\n", e.B.E)
			fmt.Printf("evt.b.t= %8.3f\n", e.B.T)
		}
		tree.Fill()
	}
	f.Write("", 0, 0)
}
Ejemplo n.º 12
0
func testBatchRGradienter(t *testing.T, batchSize int, b *BatchRGradienter) {
	rand.Seed(batchRGradienterSeed)

	net := Network{
		&DenseLayer{
			InputCount:  10,
			OutputCount: 30,
		},
		&Sigmoid{},
		&DenseLayer{
			InputCount:  30,
			OutputCount: 3,
		},
		&Sigmoid{},
	}
	net.Randomize()
	b.Learner = net.BatchLearner()

	inputs := make([]linalg.Vector, batchSize)
	outputs := make([]linalg.Vector, batchSize)
	for i := range inputs {
		inputVec := make(linalg.Vector, 10)
		outputVec := make(linalg.Vector, 3)
		for j := range inputVec {
			inputVec[j] = rand.NormFloat64()
		}
		for j := range outputVec {
			outputVec[j] = rand.Float64()
		}
		inputs[i] = inputVec
		outputs[i] = outputVec
	}
	samples := VectorSampleSet(inputs, outputs)

	rVector := autofunc.RVector(autofunc.NewGradient(net.Parameters()))
	for _, vec := range rVector {
		for i := range vec {
			vec[i] = rand.NormFloat64()
		}
	}

	single := SingleRGradienter{Learner: net, CostFunc: b.CostFunc}
	expectedGrad := single.Gradient(samples)
	actualGrad := b.Gradient(samples)

	if !vecMapsEqual(expectedGrad, actualGrad) {
		t.Error("bad gradient from Gradient()")
	}

	expectedGrad, expectedRGrad := single.RGradient(rVector, samples)
	actualGrad, actualRGrad := b.RGradient(rVector, samples)

	if !vecMapsEqual(expectedGrad, actualGrad) {
		t.Error("bad gradient from RGradient()")
	}
	if !vecMapsEqual(expectedRGrad, actualRGrad) {
		t.Error("bad r-gradient from RGradient()")
	}
}
Ejemplo n.º 13
0
func (*Tool) Run(args []string) {
	fmt.Println("running logistic regression")

	n := 1000
	p := 10

	beta := make([]float64, p)
	beta[0] = rand.NormFloat64()
	beta[1] = rand.NormFloat64()

	x := la.NewMatrix(n, p)
	y := la.NewVector(n)

	for i := 0; i < n; i++ {

		v := randVec(p)

		var z float64

		for j := 0; j < p; j++ {
			x.Set(i, j, v[j])
			z += beta[j]
		}

		if z > 0 {
			y.Set(i, +1)
		} else {
			y.Set(i, -1)
		}
	}

	rp := &model.RegressionProblem{
		N:            n,
		P:            p,
		Data:         x,
		Response:     y,
		ColumnNames:  names("p", p),
		RowNames:     names("x", n),
		ResponseName: "y",
	}

	rc := &model.LogisticRegressionRisk{}
	pc := model.NewLassoPenalty(p)

	dv := 0.001
	vmax := 0.07

	mon := &FixedVMonitor{vmax}

	oa := &model.RandomAssigner{rp.Data.Rows, 2.0 / 3.0}

	tt := oa.Assign()

	results := model.RunGpsFull(rp, tt, dv, rc, pc, mon.Continue)

	fmt.Println(results)

}
Ejemplo n.º 14
0
// Create random matrix with element's real and imaginary parts
// from [0.0, 1.0).
func ComplexNormal(rows, cols int) *ComplexMatrix {
	A := ComplexZeros(rows, cols)
	for i, _ := range A.elements {
		re := rand.NormFloat64()
		im := rand.NormFloat64()
		A.elements[i] = complex(re, im)
	}
	return A
}
Ejemplo n.º 15
0
func CreateIndis(anzindis int64, stddev, center_x, center_y float64) []*plotter.Point {
	list := make([]*plotter.Point, 0, anzindis)
	for i := 0; i < int(anzindis); i++ {
		indi := new(plotter.Point)
		indi.X = (rand.NormFloat64() * stddev) + center_x
		indi.Y = (rand.NormFloat64() * stddev) + center_y
		list = append(list, indi)
	}
	return list
}
Ejemplo n.º 16
0
// generateRandomCoordinate creates a random coordinate. This mucks with the
// underlying structure directly, so it's not really useful for any particular
// position in the network, but it's a good payload to send through to make
// sure things come out the other side or get stored correctly.
func generateRandomCoordinate() *coordinate.Coordinate {
	config := coordinate.DefaultConfig()
	coord := coordinate.NewCoordinate(config)
	for i := range coord.Vec {
		coord.Vec[i] = rand.NormFloat64()
	}
	coord.Error = rand.NormFloat64()
	coord.Adjustment = rand.NormFloat64()
	return coord
}
Ejemplo n.º 17
0
func randomPoints(n int) {

	for i := 0; i < n; i++ {
		fmt.Println(rand.NormFloat64() * 10)
	}

	for i := 0; i < n; i++ {
		fmt.Println((rand.NormFloat64() * 10) + 10)
	}
}
Ejemplo n.º 18
0
func randomRectangles(n int, world *Rectangle, avgSize float64) []*Rectangle {
	ret := make([]*Rectangle, n)
	for i := 0; i < len(ret); i++ {
		w := rand.NormFloat64() * avgSize
		h := rand.NormFloat64() * avgSize
		x := rand.Float64() * world.maxX
		y := rand.Float64() * world.maxY
		ret[i] = NewRectangle(x, math.Min(world.maxX, x+w), y, math.Min(world.maxY, y+h))
	}
	return ret
}
Ejemplo n.º 19
0
func CreateNormCircles(num int, devx, devy, meanx, meany float64) []*Circle {
	// Create a set of normally distributed points in the 2D space.
	// Parameters are given as flags or in form-data.
	circles := make([]*Circle, num)
	for i := 0; i < num; i++ {
		circles[i] = &Circle{
			x: (rand.NormFloat64()*devx + meanx),
			y: (rand.NormFloat64()*devy + meany),
		}
	}
	return circles
}
Ejemplo n.º 20
0
func RandNF(variance float64) float64 {
	var result float64
	var Mean float64 = 0
	if Mean != 0 && variance != 1 {
		var StdDev float64 = math.Sqrt(variance)
		result = rand.NormFloat64()*StdDev + Mean

	} else {
		result = rand.NormFloat64()
	}
	return result
}
Ejemplo n.º 21
0
func (d *Droplet) init() {
	d.x = math.Inf(-1)
	d.y = math.Inf(-1)
	d.r = lognorm(μr, σr)
	d.h = lognorm(d.r, σr)

	d.vx = rand.NormFloat64()*σv + μvx
	μvy := vterm * (1 - math.Exp(-1200*d.r))
	d.vy = rand.NormFloat64()*σv + μvy

	d.lifetime = 0
}
Ejemplo n.º 22
0
func genString(stddev float64) string {
	n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
	c := make([]rune, n)
	for i := range c {
		f := math.Abs(rand.NormFloat64()*64 + 32)
		if f > 0x10ffff {
			f = 0x10ffff
		}
		c[i] = rune(f)
	}
	return string(c)
}
Ejemplo n.º 23
0
func RandNC(variance float64) complex128 {
	var result complex128
	var Mean float64 = 0
	if Mean != 0 && variance != 1 {
		var StdDev float64 = math.Sqrt(variance)
		result = complex128(complex(rand.NormFloat64()*StdDev+Mean, rand.NormFloat64()*StdDev+Mean))

	} else {
		result = complex128(complex(rand.NormFloat64(), rand.NormFloat64()))
	}
	return result
}
Ejemplo n.º 24
0
func testRBatcher(t *testing.T, rv autofunc.RVector, b batchFuncR, in autofunc.RResult,
	n int, params []*autofunc.Variable) {
	funcRBatcher := autofunc.RFuncBatcher{F: b}

	t.Run("Forward", func(t *testing.T) {
		expected := funcRBatcher.BatchR(rv, in, n)
		actual := b.BatchR(rv, in, n)
		diff := actual.Output().Copy().Scale(-1).Add(expected.Output()).MaxAbs()
		if diff > 1e-5 {
			t.Errorf("expected output %v but got %v", expected, actual)
		}
		diff = actual.ROutput().Copy().Scale(-1).Add(expected.ROutput()).MaxAbs()
		if diff > 1e-5 {
			t.Errorf("expected r-output %v but got %v", expected, actual)
		}
	})

	t.Run("Backward", func(t *testing.T) {
		expectedOut := funcRBatcher.BatchR(rv, in, n)
		actualOut := b.BatchR(rv, in, n)

		expected := autofunc.NewGradient(params)
		actual := autofunc.NewGradient(params)
		expectedR := autofunc.NewRGradient(params)
		actualR := autofunc.NewRGradient(params)

		outGrad := make(linalg.Vector, len(expectedOut.Output()))
		outGradR := make(linalg.Vector, len(expectedOut.Output()))
		for i := range outGrad {
			outGrad[i] = rand.NormFloat64()
			outGradR[i] = rand.NormFloat64()
		}

		expectedOut.PropagateRGradient(outGrad.Copy(), outGradR.Copy(), expectedR, expected)
		actualOut.PropagateRGradient(outGrad, outGradR, actualR, actual)

		for i, variable := range params {
			actualVec := actual[variable]
			expectedVec := expected[variable]
			diff := actualVec.Copy().Scale(-1).Add(expectedVec).MaxAbs()
			if diff > 1e-5 {
				t.Errorf("variable %d (grad): expected %v got %v", i, expectedVec, actualVec)
			}
			actualVec = actualR[variable]
			expectedVec = expectedR[variable]
			diff = actualVec.Copy().Scale(-1).Add(expectedVec).MaxAbs()
			if diff > 1e-5 {
				t.Errorf("variable %d (rgrad): expected %v got %v", i, expectedVec, actualVec)
			}
		}
	})
}
Ejemplo n.º 25
0
// Generates n BoundingBoxes in the range of frame with average width and height avgSize
func randomBoundingBoxes(n int, frame BoundingBox, avgSize float64) []BoundingBox {
	ret := make([]BoundingBox, n)

	for i := 0; i < len(ret); i++ {
		w := rand.NormFloat64() * avgSize
		h := rand.NormFloat64() * avgSize
		x := rand.Float64()*frame.SizeX() + frame.MinX
		y := rand.Float64()*frame.SizeY() + frame.MinY
		ret[i] = NewBoundingBox(x, math.Min(frame.MaxX, x+w), y, math.Min(frame.MaxY, y+h))
	}

	return ret
}
Ejemplo n.º 26
0
func TestGenMatMul(t *testing.T) {
	const (
		m = 3
		k = 4
		n = 5
	)
	alpha, beta := rand.NormFloat64(), rand.NormFloat64()
	a, b, c := randMat(m, k), randMat(k, n), randMat(m, n)
	want := mat.Plus(mat.Scale(alpha, mat.Mul(a, b)), mat.Scale(beta, c))
	// Over-write c with result.
	blas.GenMatMul(alpha, a, b, beta, c)
	checkEqualMat(t, want, c, 1e-9)
}
Ejemplo n.º 27
0
func init() {
	// set random as constant for gaussian
	// clusters!
	rand.Seed(42)

	// 4 2d gaussians
	gaussian = [][]float64{}
	for i := 0; i < 40; i++ {
		x := rand.NormFloat64() + 4
		y := rand.NormFloat64()*0.25 + 5
		gaussian = append(gaussian, []float64{x, y})
	}
	for i := 0; i < 66; i++ {
		x := rand.NormFloat64()
		y := rand.NormFloat64() + 10
		gaussian = append(gaussian, []float64{x, y})
	}
	for i := 0; i < 100; i++ {
		x := rand.NormFloat64()*3 - 10
		y := rand.NormFloat64()*0.25 - 7
		gaussian = append(gaussian, []float64{x, y})
	}
	for i := 0; i < 23; i++ {
		x := rand.NormFloat64() * 2
		y := rand.NormFloat64() - 1.25
		gaussian = append(gaussian, []float64{x, y})
	}
}
Ejemplo n.º 28
0
func (p *Pool) mutate(genes []float64) {
	gToMutate := float64(len(genes)) * p.mutatePer
	if gToMutate < 1 {
		gToMutate = 1
	}
	for i := 0; i < int(gToMutate); i++ {
		r := rand.Int()
		if r%2 == 0 {
			genes[rand.Intn(len(genes))] += rand.NormFloat64() * p.mStrength
		} else {
			genes[rand.Intn(len(genes))] -= rand.NormFloat64() * p.mStrength
		}
	}
}
Ejemplo n.º 29
0
// Create symmetric n by n random  matrix with element's real and imaginary
// parts from normal distribution.
func ComplexNormalSymmetric(n int) *ComplexMatrix {
	A := ComplexZeros(n, n)
	for i := 0; i < n; i++ {
		for j := i; j < n; j++ {
			re := rand.NormFloat64()
			im := rand.NormFloat64()
			val := complex(re, im)
			A.SetAt(val, i, j)
			if i != j {
				A.SetAt(val, j, i)
			}
		}
	}
	return A
}
Ejemplo n.º 30
0
func TestLatencyEWMAFun(t *testing.T) {
	t.Skip("run it for fun")

	m := peer.NewMetrics()
	id, err := testutil.RandPeerID()
	if err != nil {
		t.Fatal(err)
	}

	mu := 100.0
	sig := 10.0
	next := func() time.Duration {
		mu = (rand.NormFloat64() * sig) + mu
		return time.Duration(mu)
	}

	print := func() {
		fmt.Printf("%3.f %3.f --> %d\n", sig, mu, m.LatencyEWMA(id))
	}

	for {
		select {
		case <-time.After(200 * time.Millisecond):
			m.RecordLatency(id, next())
			print()
		}
	}
}