func (p *Plan) FindParent(materials material.MaterialSlice) string { seed := time.Now().UnixNano() fmt.Println("Seed:", seed) rand.Seed(seed) // FIXME use own PRNG instance ingredients := make([]Ingredient, 0) for use, amount := range p.Requirements { possibleMaterials := materials.FindByUse(use) remainingAmount := amount for remainingAmount > 0 { randomAmount := rand.Int63n(int64(remainingAmount)) + 1 randomIndex := rand.Int63n(int64(len(possibleMaterials))) randomGrade := rand.Int63n(5) mat := possibleMaterials[randomIndex] ingredient := Ingredient{mat, use, material.GradeIndexToGrade(randomGrade), uint8(randomAmount)} ingredients = append(ingredients, ingredient) remainingAmount -= uint8(randomAmount) } } recipe := &Recipe{p, ingredients} genes := recipe.genome(materials) fmt.Println("Genome:", genes) return genes }
// Requirement is TaskGroup // Object is Agent's Location func (s *Scheduler) Fetch(demands []market.Demand) { var request resource.AllocationRequest for _, d := range demands { demand := d.Requirement.(*plan.TaskGroup) request.Requests = append(request.Requests, resource.ComputeRequest{ ComputeResource: resource.ComputeResource{ CPUCount: 1, CPULevel: 1, MemoryMB: int64(s.Option.TaskMemoryMB), }, Inputs: s.findTaskGroupInputs(demand), }) } result, err := Assign(s.Leader, &request) if err != nil { log.Printf("%s Failed to allocate: %v", s.Leader, err) time.Sleep(time.Millisecond * time.Duration(15000+rand.Int63n(5000))) } else { if len(result.Allocations) == 0 { log.Printf("%s Failed to allocate any executor.", s.Leader) time.Sleep(time.Millisecond * time.Duration(2000+rand.Int63n(1000))) } else { log.Printf("%s allocated %d executors.", s.Leader, len(result.Allocations)) for _, allocation := range result.Allocations { s.Market.AddSupply(market.Supply{ Object: allocation, }) } } } }
func TestDotInt64WithDiffLength(t *testing.T) { N := 1000 + rand.Intn(1000000) M := 1000 + rand.Intn(1000000) a := make([]int64, N) b := make([]int64, M) Expected := int64(0) for i := range a { if i < N { a[i] = gomath.ScaleInt64(LowInt64, HighInt64, 0, HighInt64, rand.Int63n(HighInt64)) } if i < M { b[i] = gomath.ScaleInt64(LowInt64, HighInt64, 0, HighInt64, rand.Int63n(HighInt64)) } if i < N && i < M { Expected += a[i] * b[i] } } Computed := DotInt64(a, b) if Computed != Expected { t.Logf("Expected %d but computed %d\n", Expected, Computed) t.FailNow() } }
// GetRandomValues RPC call for OST, returns random values for testing func (*OssRpcT) GetRandomValues(init bool, result *OstValues) error { result.OstTotal = make(map[string]OstStats) result.NidValues = make(map[string]map[string]OstStats) for i := 0; i < 10; i++ { ost := "OST" + strconv.Itoa(int(rand.Int63n(10))) t := result.OstTotal[ost] t.RBs = rand.Int63n(10) t.WBs = rand.Int63n(10) t.RRqs = rand.Int63n(100) t.WRqs = rand.Int63n(100) result.OstTotal[ost] = t result.NidValues[ost] = make(map[string]OstStats) for j := 0; j < 100; j++ { nid := "nid" + strconv.Itoa(int(rand.Int63n(100))) t := result.NidValues[ost][nid] t.RBs = rand.Int63n(10) t.WBs = rand.Int63n(10) t.RRqs = rand.Int63n(100) t.WRqs = rand.Int63n(100) result.NidValues[ost][nid] = t } } return nil }
func (self *sector) hyperWatch() { ratio := int64(1e3) for { // Keeping number of players in field constant. if len(self.players) < 20 { ai := newAgent() ai.player.Position = [2]float64{ float64(rand.Int63n(ratio) - ratio/2), float64(rand.Int63n(ratio) - ratio/2), } self.addPlayer(ai.player) } // Keeping number of items constant. if len(self.powerups) < 30 { pu := newPowerup() // Random position. pu.Position = [2]float64{ float64(rand.Int63n(self.bounds[0]) - self.bounds[0]/2), float64(rand.Int63n(self.bounds[1]) - self.bounds[1]/2), } self.addPowerup(pu) } log.Printf("players: %v, items: %v\n", len(self.players), len(self.powerups)) time.Sleep(time.Millisecond * time.Duration(rand.Float32()*hyperWatchMaxTime)) } }
func loadData() { var DOCS_SIZE uint32 = 50000 var TAGS_SIZE uint64 = 1024 docs = NewDocuments(DOCS_SIZE, TAGS_SIZE) var i uint32 for i = 0; i < DOCS_SIZE; i++ { d := bitset.New64(TAGS_SIZE) d.Set(1) for j := 0; j < 200; j++ { d.Set(uint64(rand.Int63n(int64(TAGS_SIZE)))) } docs.Set(i, d) } d := bitset.New64(TAGS_SIZE) d.Set(1) for j := 0; j < 200; j++ { d.Set(uint64(rand.Int63n(int64(TAGS_SIZE)))) } for i = 1; i < 5; i++ { d := bitset.New64(TAGS_SIZE) for j := 0; j < 200; j++ { d.Set(uint64(rand.Int63n(int64(TAGS_SIZE)))) } docs.bonus[i] = d } }
func TestRead(t *testing.T) { compare := func(offset int64, size int64) { l := size - offset buf := make([]byte, l) res, code := objfile.Read(buf, offset) if !code.Ok() { t.Fatalf("Read() error, (not OK)") } read, _ := res.Bytes(buf) data := string(read) if TEST_DATA[offset:offset+l] != data { t.Fatalf("Read() returns invalid data. (offset:%d, size:%d)", offset, size) } } var i = 0 for i < 10 { size := rand.Int63n(int64(len(TEST_DATA))) offset := rand.Int63n(size - 1) compare(offset, size) i++ } }
func init() { nodes = make([]element.Node, 64) offset := rand.Int63n(1e10) for i := range nodes { nodes[i] = element.Node{OSMElem: element.OSMElem{Id: offset + rand.Int63n(1000)}, Long: rand.Float64()*360 - 180, Lat: rand.Float64()*180 - 90} } }
func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info.ContainerStats { ret := make([]*info.ContainerStats, numStats) perCoreUsages := make([]uint64, numCores) currentTime := time.Now() for i := range perCoreUsages { perCoreUsages[i] = uint64(rand.Int63n(1000)) } for i := 0; i < numStats; i++ { stats := new(info.ContainerStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) percore := make([]uint64, numCores) for i := range perCoreUsages { perCoreUsages[i] += uint64(rand.Int63n(1000)) percore[i] = perCoreUsages[i] stats.Cpu.Usage.Total += percore[i] } stats.Cpu.Usage.PerCpu = percore stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Memory.Usage = uint64(rand.Int63n(4096)) ret[i] = stats } return ret }
/* Begins an immediate-mode database transaction. */ func (db *SqliteDBThread) BeginTransaction(transactionType string) (err error) { db.dbt.th.AllowGC() defer db.dbt.th.DisallowGC() stmt := "BEGIN " + transactionType + " TRANSACTION" err = db.ExecStatement(stmt) var TRY_GAP_WIDENING_MS_INCREMENT int64 = 100 // ms - longest wait will be 6 seconds + random factor var tryGapWidth int64 = TRY_GAP_WIDENING_MS_INCREMENT var r int64 for i := 0; err != nil && i < N_BEGIN_TRIES; i++ { Logln(PERSIST2_, "BEGIN", transactionType, "ERR:", err) r = rand.Int63n(500 + tryGapWidth) if i > 5 { TRY_GAP_WIDENING_MS_INCREMENT = 1000 } tryGapWidth += TRY_GAP_WIDENING_MS_INCREMENT time.Sleep(time.Duration((tryGapWidth + r) * 1000000)) err = db.ExecStatement(stmt) } if transactionType == "DEFERRED" { // We really don't want a deferred transaction. We want to create a SHARED lock right // away in the SQLITE database. If we cannot, we need to fail-fast here, so that // the code inside the transaction-protected block will not be executed. // So do a dummy read query. if dummyQuery == nil { dummyQuery, err = db.Prepare("select rowid from RPackage where rowid=1") if err != nil { panic(err) } } err = dummyQuery.Query() dummyQuery.Reset() var TRY_GAP_WIDENING_MS_INCREMENT int64 = 100 // ms - longest wait will be 6 seconds + random factor var tryGapWidth int64 = TRY_GAP_WIDENING_MS_INCREMENT var r int64 for j := 0; err != nil && j < N_BEGIN_TRIES; j++ { Logln(PERSIST2_, "BEGIN DEFERRED: ERR executing dummy select query:", err) r = rand.Int63n(500 + tryGapWidth) if j > 5 { TRY_GAP_WIDENING_MS_INCREMENT = 1000 } tryGapWidth += TRY_GAP_WIDENING_MS_INCREMENT time.Sleep(time.Duration((tryGapWidth + r) * 1000000)) err = dummyQuery.Query() dummyQuery.Reset() } } if err == nil { Logln(PERSIST2_, ">>>>>>>>>>>>>>>>>>>>>>>>> SUCCESSFULLY BEGAN", transactionType, "TRANSACTION") } return }
func getSignedTrans() IBlock { if nb != nil { return nb } nb = new(Transaction) t := nb.(*Transaction) for i := 0; i < 5; i++ { t.AddInput(nextAddress(), uint64(rand.Int63n(10000000000))) } for i := 0; i < 3; i++ { t.AddOutput(nextAddress(), uint64(rand.Int63n(10000000000))) } for i := 0; i < 3; i++ { t.AddECOutput(nextAddress(), uint64(rand.Int63n(10000000))) } for i := 0; i < 3; i++ { sig := NewRCD_1(nextSig()) t.AddAuthorization(sig) } for i := 0; i < 2; i++ { t.AddAuthorization(nextAuth2()) } return nb }
func newFactoidTransaction() *FactoidTransaction { msg := new(FactoidTransaction) t := new(factoid.Transaction) for i := 0; i < 5; i++ { t.AddInput(nextAddress(), uint64(rand.Int63n(10000000000))) } for i := 0; i < 3; i++ { t.AddOutput(nextAddress(), uint64(rand.Int63n(10000000000))) } for i := 0; i < 3; i++ { t.AddECOutput(nextAddress(), uint64(rand.Int63n(10000000))) } for i := 0; i < 3; i++ { sig := factoid.NewRCD_1(nextSig()) t.AddAuthorization(sig) } for i := 0; i < 2; i++ { t.AddAuthorization(nextAuth2()) } msg.Transaction = t return msg }
// Randomly generate number [start,end) except @except. func randInt64Except(start, end int64, except map[int64]empty) int64 { n := end - start ret := rand.Int63n(n) + start for _, ok := except[ret]; ok; _, ok = except[ret] { ret = rand.Int63n(n) + start } return ret }
func (p *Producer) Action() { var w Work println("Producer Action") w.start = current + time(rand.Int63n(100)) clock.insert(&w) p.start = current + time(rand.Int63n(100)) clock.insert(p) }
func TestMonitorServer(t *testing.T) { config := &hydra.Config{ Monitor: &hydra.ConfigMonitor{ Host: "localhost", Port: 0, }, } _, ch := hydra.NewChannel() monitor, _ := hydra.NewMonitor(config, ch) go monitor.Run() expectedMessages := make(map[string]int64) expectedBytes := make(map[string]int64) tags := []string{"foo", "bar", "dummy.test"} for _, tag := range tags { for i := 1; i <= 100; i++ { m := rand.Int63n(10) b := rand.Int63n(2560) ch <- &hydra.SentStat{ Tag: tag, Messages: m, Bytes: b, } expectedMessages[tag] += m expectedBytes[tag] += b } } sleep(1) resp, err := http.Get(fmt.Sprintf("http://%s/", monitor.Addr)) if err != nil { t.Error(err) } defer resp.Body.Close() if ct := resp.Header.Get("Content-Type"); ct != "application/json" { t.Error("invalid content-type", ct) } body, _ := ioutil.ReadAll(resp.Body) js := bytes.NewReader(body) for tag, n := range expectedMessages { js.Seek(int64(0), os.SEEK_SET) var got int64 scan.ScanJSON(js, "/sent/"+tag+"/messages", &got) if got != n { t.Errorf("/sent/%s/messages got %d expected %d", tag, got, n) } } for tag, n := range expectedBytes { js.Seek(int64(0), os.SEEK_SET) var got int64 scan.ScanJSON(js, "/sent/"+tag+"/bytes", &got) if got != n { t.Errorf("/sent/%s/bytes got %d expected %d", tag, got, n) } } log.Println(string(body)) }
func requester(work chan Request) { for { time.Sleep(time.Duration(rand.Int63n(MaxRequesters * Seconds))) work <- func() { r := rand.Int63n(MaxRequesters*Seconds) + 10 time.Sleep(time.Duration(r)) } } }
func valRangePyramid(n int, low, high int64) []int64 { seq := (high - low) / 4 vals := make([]int64, n) for i := 0; i < n; i++ { val := rand.Int63n(seq) + rand.Int63n(seq) + rand.Int63n(seq) + rand.Int63n(seq) vals[i] = val + low } return vals }
// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. func New() *NUID { n := &NUID{ seq: prand.Int63n(maxSeq), inc: minInc + prand.Int63n(maxInc-minInc), pre: make([]byte, preLen), } n.RandomizePrefix() return n }
func BenchmarkRAWInput(b *testing.B) { quit := make(chan int) origin := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) defer origin.Close() originAddr := strings.Replace(origin.Listener.Addr().String(), "[::]", "127.0.0.1", -1) var respCounter, reqCounter int64 input := NewRAWInput(originAddr, EnginePcap, true, testRawExpire, "") defer input.Close() output := NewTestOutput(func(data []byte) { if data[0] == '1' { atomic.AddInt64(&reqCounter, 1) } else { atomic.AddInt64(&respCounter, 1) } // log.Println("Captured ", reqCounter, "requests and ", respCounter, " responses") }) Plugins.Inputs = []io.Reader{input} Plugins.Outputs = []io.Writer{output} go Start(quit) emitted := 0 fileContent, _ := ioutil.ReadFile("LICENSE.txt") for i := 0; i < b.N; i++ { wg := new(sync.WaitGroup) wg.Add(10 * 100) emitted += 10 * 100 for w := 0; w < 100; w++ { go func() { client := NewHTTPClient(origin.URL, &HTTPClientConfig{}) for i := 0; i < 10; i++ { if rand.Int63n(2) == 0 { client.Post("/", fileContent) } else { client.Get("/") } time.Sleep(time.Duration(rand.Int63n(50)) * time.Millisecond) wg.Done() } }() } wg.Wait() } time.Sleep(400 * time.Millisecond) log.Println("Emitted ", emitted, ", Captured ", reqCounter, "requests and ", respCounter, " responses") close(quit) }
func TestNewWithRandEpsilon(t *testing.T) { for i := 0; i < 100; i++ { size := 1 + uint64(rand.Int63n(1000)) // rand.Float64() returns [0,1) epsilon := rand.Float64() x := New(size, epsilon) if nil == x { t.Errorf("Expected a non-nil return for epsilon [%v]", epsilon) } if epsilon != x.epsilon { t.Errorf("Stored epsilon and test epsilon are different for size [%+v] and epsilon [%+v] and object [%+v]", size, epsilon, x) } if size != x.size { t.Errorf("Stored size and test size are different for size [%+v] and epsilon [%+v] and object [%+v]", size, epsilon, x) } if size != uint64(len(x.counts)) { t.Errorf("Length of stored counts and test size are different for size [%+v] and epsilon [%+v] and object [%+v]", size, epsilon, x) } if size != uint64(len(x.values)) { t.Errorf("Length of stored values and test size are different for size [%+v] and epsilon [%+v] and object [%+v]", size, epsilon, x) } } for i := 0; i < 100; i++ { size := 1 + uint64(rand.Int63n(1000)) // rand.Float64() returns [0,1) epsilon := 1 + rand.Float64() if 0 == rand.Int31n(1) { epsilon += rand.Float64() } if 0 == rand.Int31n(1) { epsilon *= 1 + rand.Float64() } if 0 == rand.Int31n(1) { epsilon *= -1 } x := New(size, epsilon) if nil != x { t.Errorf("Expected a nil return for size [%+v] and epsilon [%v]", size, epsilon) } } }
func (r *ChunkMigrationGen) Init() { r.C = make(chan [3]int64) go func() { for { <-r.C r.C <- [3]int64{rand.Int63(), rand.Int63n(3), rand.Int63n(3)} } }() }
func makeTranaferResults(ip net.IP, bytesSent uint32) api.TransferResults { return api.TransferResults{ IP: ip, BytesSent: bytesSent, Checksum: uint32(rand.Int31()), Duration: time.Duration(rand.Int63n(1000)) * time.Millisecond, RTT: time.Duration(rand.Int63n(100)) * time.Millisecond, Time: time.Now(), } }
func GenerateRandomContainerSpec(numCores int) info.ContainerSpec { ret := info.ContainerSpec{ Cpu: info.CpuSpec{}, Memory: info.MemorySpec{}, } ret.Cpu.Limit = uint64(1000 + rand.Int63n(2000)) ret.Cpu.MaxLimit = uint64(1000 + rand.Int63n(2000)) ret.Cpu.Mask = fmt.Sprintf("0-%d", numCores-1) ret.Memory.Limit = uint64(4096 + rand.Int63n(4096)) return ret }
// Given a retry count and a base retry interval, return the next retry interval // according to the exponential backoff algorithm. // Maximum returned retry interval is interval * 2^15. func nextExponentialBackoff(retries uint, interval time.Duration) time.Duration { switch { case retries == 0: return time.Duration(rand.Int63n(int64(interval))) case retries == 1: return time.Duration(rand.Int63n(7)) * interval case retries >= 2 && retries < 15: return time.Duration(rand.Int63n((1<<retries)-1)) * interval default: } return time.Duration(rand.Int63n(1<<15)) * interval }
func init() { // reason for this: if an image with the same name will be served // in different sizes then the displayed pattern will be the same // but the color changes with each start of mediamock. // This feature makes it possible for the human to recognize a resized image. var key []byte rand.Seed(time.Now().Unix()) a := rand.Int63n(2000) key = strconv.AppendInt(key, a, 10) b := rand.Int63n(2000) key = strconv.AppendInt(key, b, 10) sipHashKey = md5.Sum(key) }
func TestInt64Cmap(t *testing.T) { newCmap := func() ConcurrentMap { keyType := reflect.TypeOf(int64(2)) elemType := keyType return NewConcurrentMap(keyType, elemType) } testConcurrentMap( t, newCmap, func() interface{} { return rand.Int63n(1000) }, func() interface{} { return rand.Int63n(1000) }, reflect.Int64, reflect.Int64) }
func randSize() int64 { r := rand.Float32() switch { case r < 0.05: return rand.Int63n(h("3mb")) + h("1mb") case r < 0.2: return rand.Int63n(h("200k")) + h("100k") case r < 0.5: return rand.Int63n(h("50k")) + h("50k") case r < 0.6: return rand.Int63n(h("50k")) + h("10k") } return rand.Int63n(h("10k")) }
func TestMin(t *testing.T) { rand.Seed(1) for i := 0; i < 1000*1000; i++ { a := rand.Int63n(1000 * 1000 * 1000) b := rand.Int63n(1000 * 1000 * 1000) m := Min(a, b) om := simpleMin(a, b) if m != om { as := fstrconv.ItoaComma(a) bs := fstrconv.ItoaComma(b) ms := fstrconv.ItoaComma(m) t.Errorf("Problem with min of %s, %s - min returned %s", as, bs, ms) } } }
func TestInt64Keys(t *testing.T) { testKeys(t, func() Keys { //return NewKeys( //func(e1 interface{}, e2 interface{}) int8 { // k1 := e1.(int64) // k2 := e2.(int64) // if k1 < k2 { // return -1 // } else if k1 > k2 { // return 1 // } else { // return 0 // } //}, //reflect.TypeOf(int64(1))) int64Keys := &myKeys{ container: make([]interface{}, 0), compareFunc: func(e1 interface{}, e2 interface{}) int8 { k1 := e1.(int64) k2 := e2.(int64) if k1 < k2 { return -1 } else if k1 > k2 { return 1 } else { return 0 } }, elemType: reflect.TypeOf(int64(1))} return int64Keys }, func() interface{} { return rand.Int63n(1000) }, reflect.Int64) }
// poll all known lookup servers every LookupdPollInterval func (q *Reader) lookupdLoop() { // add some jitter so that multiple consumers discovering the same topic, // when restarted at the same time, dont all connect at once. rand.Seed(time.Now().UnixNano()) jitter := time.Duration(rand.Int63n(int64(q.LookupdPollInterval / 10))) ticker := time.NewTicker(q.LookupdPollInterval) select { case <-time.After(jitter): case <-q.lookupdExitChan: goto exit } for { select { case <-ticker.C: q.queryLookupd() case <-q.lookupdRecheckChan: q.queryLookupd() case <-q.lookupdExitChan: goto exit } } exit: ticker.Stop() log.Printf("exiting lookupdLoop") }