func TestARC_RandomOps(t *testing.T) { size := 128 l, err := NewARC(128) if err != nil { t.Fatalf("err: %v", err) } n := 200000 for i := 0; i < n; i++ { key := rand.Int63() % 512 r := rand.Int63() switch r % 3 { case 0: l.Add(key, key) case 1: l.Get(key) case 2: l.Remove(key) } if l.t1.Len()+l.t2.Len() > size { t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) } if l.b1.Len()+l.b2.Len() > size { t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) } } }
func TestGenCtxKey(t *testing.T) { t.Log("test gen ctx key map") ctxKeyMap := make(map[string]int64, 10) rand.Seed(100) ctx := rand.Int63() //int64(1000) key := fmt.Sprintf("%x", ctx) t.Logf("key value %s", key) ctxKeyMap[key] = ctx ctx1 := rand.Int63() key1 := fmt.Sprintf("%x", ctx1) t.Logf("key1 value %s", key1) ctxKeyMap[key1] = ctx1 if ctxKeyMap["a"] != 0 { t.Error("notfound map value is not 0") } else { t.Log("notfound map value is 0") } if ctxKeyMap[key] != ctx { t.Errorf("key is not in map, get ctx value=%d", ctxKeyMap[key]) } else { t.Log("test ok...") } }
func createNoise(win ui.Window, screen draw.Image) { var rnd, rnd2 uint64 var rnd16a, rnd16b, rnd16c, rnd16d uint16 var img [240 * 320 * 4]byte // Populate the image with pixel data for { for i := 0; i < len(img); i += 256 { rnd = uint64(rand.Int63()) if (i % 63) == 0 { rnd2 = uint64(rand.Int63()) } rnd |= rnd2 & 1 << 63 // we have to set the 64'th bit from the rand.Int63() manualy rnd16a = uint16(rnd & 0x000000000000FFFF) rnd16b = uint16((rnd >> 16) & 0x000000000000FFFF) rnd16c = uint16((rnd >> 32) & 0x000000000000FFFF) rnd16d = uint16((rnd >> 48) & 0x000000000000FFFF) copy(img[i:i+64], bw[rnd16a][:]) copy(img[i+64:i+128], bw[rnd16b][:]) copy(img[i+128:i+192], bw[rnd16c][:]) copy(img[i+192:i+256], bw[rnd16d][:]) rnd2 = rnd2 >> 1 // rotate to next random bit } // Copy pixel data to the screen copy(screen.(*image.RGBA).Pix, img[:]) frameCount <- 1 win.FlushImage() } }
// // servers[] contains the ports of the set of // servers that will cooperate via Paxos to // form the fault-tolerant key/value service. // me is the index of the current server in servers[]. // func StartServer(servers []string, me int) *KVPaxos { // this call is all that's needed to persuade // Go's RPC library to marshall/unmarshall // struct Op. gob.Register(Op{}) kv := new(KVPaxos) kv.me = me // Your initialization code here. kv.mydb = map[string]string{} kv.history = map[int64]string{} kv.DoneInst = 0 rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { fmt.Printf("KVPaxos(%v) accept: %v\n", me, err.Error()) kv.kill() } } }() return kv }
func Test_Replay(test *testing.T) { type mh struct { hash []byte time int64 } var h [5000]*mh for i := 0; i < 5000; i++ { h[i] = new(mh) h[i].hash = fct.Sha([]byte(fmt.Sprintf("h%d", i))).Bytes() h[i].time = now + (rand.Int63() % 24 * hour) - 12*hour if !IsTSValid_(h[i].hash, h[i].time, now) { fmt.Println("Failed Test ", i, "first") test.Fail() return } if IsTSValid_(h[i].hash, h[i].time, now) { fmt.Println("Failed Test ", i, "second") test.Fail() return } now += rand.Int63() % hour for j := 0; j < i; j++ { if IsTSValid_(h[i].hash, h[i].time, hour) { fmt.Println("Failed Test ", i, j, "repeat") test.Fail() return } } } }
func benchTree(b *testing.B, n int, put, get bool) { fillBenchTree(b, n) b.StopTimer() oldprocs := runtime.GOMAXPROCS(runtime.NumCPU()) defer runtime.GOMAXPROCS(oldprocs) var keys [][]byte var vals [][]byte for i := 0; i < b.N; i++ { keys = append(keys, murmur.HashString(fmt.Sprint(rand.Int63()))) vals = append(vals, []byte(fmt.Sprint(rand.Int63()))) } var k []byte var v []byte b.StartTimer() for i := 0; i < b.N; i++ { k = benchmarkTestKeys[i%len(benchmarkTestKeys)] v = benchmarkTestValues[i%len(benchmarkTestValues)] if put { benchmarkTestTree.Put(k, v, 1) } if get { j, _, existed := benchmarkTestTree.Get(k) if bytes.Compare(j, v) != 0 { b.Fatalf("%v should contain %v, but got %v, %v", benchmarkTestTree.Describe(), v, j, existed) } } } }
func (p *FileResource) CreatePath(req Request, cxt Context) (string, Request, Context, int, error) { frc := cxt.(FileResourceContext) if frc.IsDir() { newPath := filepath.Join(frc.FullPath(), string(rand.Int63())) frc2 := NewFileResourceContextWithPath(newPath) for frc2.Exists() { newPath = filepath.Join(frc.FullPath(), string(rand.Int63())) frc2 = NewFileResourceContextWithPath(newPath) } frc = frc2 } else if frc.Exists() { p := frc.FullPath() dir, tail := path.Split(p) ext := path.Ext(tail) basename := tail uniquify := time.Now().UTC().Format(".20060102.150405") if len(ext) > 0 { basename = tail[:len(tail)-len(ext)] + uniquify frc.SetFullPath(path.Join(dir, basename+ext)) for counter := 1; frc.Exists(); counter++ { frc.SetFullPath(path.Join(dir, basename+"."+strconv.Itoa(counter)+ext)) } } else { basename = basename + uniquify frc.SetFullPath(path.Join(dir, basename)) for counter := 1; frc.Exists(); counter++ { frc.SetFullPath(path.Join(dir, basename+"."+strconv.Itoa(counter))) } } } log.Print("[FileResource]: Will use path ", frc.FullPath()) return frc.FullPath(), req, frc, 0, nil }
func Test2Q_RandomOps(t *testing.T) { size := 128 l, err := New2Q(128) if err != nil { t.Fatalf("err: %v", err) } n := 200000 for i := 0; i < n; i++ { key := rand.Int63() % 512 r := rand.Int63() switch r % 3 { case 0: l.Add(key, key) case 1: l.Get(key) case 2: l.Remove(key) } if l.recent.Len()+l.frequent.Len() > size { t.Fatalf("bad: recent: %d freq: %d", l.recent.Len(), l.frequent.Len()) } } }
// AES-256 Friendly, Great for Session ID's func KeyGen() string { const keyLen = 32 curtime := time.Now() second := uint64ToByte(uint64(curtime.Unix())) nano := uint64ToByte(uint64(curtime.UnixNano())) rand1 := uint64ToByte(uint64(rand.Int63())) rand2 := uint64ToByte(uint64(rand.Int63())) b := []byte{} for key, value := range second { b = append(b, value, rand1[key]) } for key, value := range nano { b = append(b, value, rand2[key]) } hash := sha256.New() defer hash.Reset() hash.Write(b) b = hash.Sum(nil) str := base64.URLEncoding.EncodeToString(b) return str[:keyLen] }
// // the application wants to create a paxos peer. // the ports of all the paxos peers (including this one) // are in peers[]. this servers port is peers[me]. // func Make(peers []string, me int, rpcs *rpc.Server) *Paxos { px := &Paxos{} px.peers = peers px.me = me // Your initialization code here. if rpcs != nil { // caller will create socket &c rpcs.Register(px) } else { rpcs = rpc.NewServer() rpcs.Register(px) // prepare to receive connections from clients. // change "unix" to "tcp" to use over a network. os.Remove(peers[me]) // only needed for "unix" l, e := net.Listen("unix", peers[me]) if e != nil { log.Fatal("listen error: ", e) } px.l = l // please do not change any of the following code, // or do anything to subvert it. // create a thread to accept RPC connections go func() { for px.dead == false { conn, err := px.l.Accept() if err == nil && px.dead == false { if px.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if px.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } px.rpcCount++ go rpcs.ServeConn(conn) } else { px.rpcCount++ go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && px.dead == false { fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error()) } } }() } return px }
func MakeTestTasksN(height, width, connectivity int) graph.Tasks { t := make(graph.Tasks) levels := make([][]*graph.Task, height) for i := range levels { levels[i] = make([]*graph.Task, width) for j := range levels[i] { task := &graph.Task{ Id: graph.TaskID(strconv.FormatInt(rand.Int63(), 10)), Start: rand.Int63(), End: rand.Int63(), Completed: rand.Int()%2 == 0, Dependencies: graph.MakeTaskIDSet(), } t[task.Id] = task levels[i][j] = task } } for depth, level := range levels[:height-1] { for _, task := range level { connections := rand.Int31n(int32(connectivity)) for i := 0; i < int(connections); i++ { row, col := rand.Int31n(int32(height-depth-1)), rand.Int31n(int32(width)) task.Dependencies.Add(levels[depth+int(row)+1][col].Id) } } } return t }
// // servers[] contains the ports of the set of // servers that will cooperate via Paxos to // form the fault-tolerant key/value service. // me is the index of the current server in servers[]. // func StartServer(servers []string, me int) *KVPaxos { // call gob.Register on structures you want // Go's RPC library to marshall/unmarshall. gob.Register(Op{}) kv := new(KVPaxos) kv.me = me // initialization kv.kvData = make(map[string]string) kv.preReply = make(map[string]*Op) kv.seqChan = make(map[int]chan *Op) kv.maxInstanceID = -1 rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { fmt.Printf("KVPaxos(%v) accept: %v\n", me, err.Error()) kv.kill() } } }() go kv.updateStatus() return kv }
func BenchmarkARC_Freq(b *testing.B) { l, err := NewARC(8192) if err != nil { b.Fatalf("err: %v", err) } trace := make([]int64, b.N*2) for i := 0; i < b.N*2; i++ { if i%2 == 0 { trace[i] = rand.Int63() % 16384 } else { trace[i] = rand.Int63() % 32768 } } b.ResetTimer() for i := 0; i < b.N; i++ { l.Add(trace[i], trace[i]) } var hit, miss int for i := 0; i < b.N; i++ { _, ok := l.Get(trace[i]) if ok { hit++ } else { miss++ } } b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) }
func StartServer(vshost string, me string) *PBServer { pb := new(PBServer) pb.me = me pb.vs = viewservice.MakeClerk(me, vshost) // Your pb.* initializations here. pb.view = viewservice.View{} pb.content = make(map[string]string) pb.client = make(map[string]string) rpcs := rpc.NewServer() rpcs.Register(pb) os.Remove(pb.me) l, e := net.Listen("unix", pb.me) if e != nil { log.Fatal("listen error: ", e) } pb.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for pb.isdead() == false { conn, err := pb.l.Accept() if err == nil && pb.isdead() == false { if pb.isunreliable() && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if pb.isunreliable() && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && pb.isdead() == false { fmt.Printf("PBServer(%v) accept: %v\n", me, err.Error()) pb.kill() } } }() go func() { for pb.isdead() == false { pb.tick() time.Sleep(viewservice.PingInterval) } }() return pb }
func TestPostSpans(t *testing.T) { rand.Seed(time.Now().UnixNano()) traceID := fmt.Sprintf("%016x", rand.Int63()) id, name, duration := fmt.Sprintf("%016x", rand.Int63()), "test_post_spans", int64(time.Microsecond*1000) id2, name2, duration2 := fmt.Sprintf("%016x", rand.Int63()), "test_post_spans2", int64(time.Microsecond*1000) debug := true ipv4, port, serviceName1, serviceName2 := "127.0.0.1", int64(80), "store_test", "store_test2" ep1 := &models.Endpoint{IPV4: &ipv4, Port: &port, ServiceName: &serviceName1} ep2 := &models.Endpoint{IPV4: &ipv4, Port: &port, ServiceName: &serviceName2} ts := time.Now().UnixNano() / 1000 annKey1, annValue1 := "key1", base64.StdEncoding.EncodeToString([]byte("value1")) req := []*models.Span{ { TraceID: &traceID, ID: &id, Name: &name, ParentID: nil, Timestamp: ts, Duration: &duration, Debug: &debug, Annotations: []*models.Annotation{ {ep1, Int64(ts + 100), models.AnnotationValue_SERVER_RECV.Addr()}, {ep1, Int64(ts + 200), models.AnnotationValue_CLIENT_SEND.Addr()}, {ep1, Int64(ts + 300), models.AnnotationValue_CLIENT_RECV.Addr()}, {ep1, Int64(ts + 400), models.AnnotationValue_SERVER_SEND.Addr()}, }, }, { TraceID: &traceID, ID: &id2, Name: &name2, ParentID: &id, Timestamp: ts + 200, Duration: &duration2, Debug: &debug, Annotations: []*models.Annotation{ {ep2, Int64(ts + 210), models.AnnotationValue_SERVER_RECV.Addr()}, {ep2, Int64(ts + 220), models.AnnotationValue_CLIENT_SEND.Addr()}, {ep2, Int64(ts + 230), models.AnnotationValue_CLIENT_RECV.Addr()}, {ep2, Int64(ts + 240), models.AnnotationValue_SERVER_SEND.Addr()}, }, BinaryAnnotations: []*models.BinaryAnnotation{ {models.AnnotationType_STRING.Addr(), ep2, &annKey1, &annValue1}, }, }} reqData, _ := json.Marshal(req) resp, err := http.Post("http://localhost:8081/api/v1/spans", "application/json", bytes.NewReader(reqData)) if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusAccepted { msg, _ := ioutil.ReadAll(resp.Body) t.Logf("%s", msg) t.Fatal(resp.StatusCode) } }
// GetTestListJSON : Get Test List Json func GetTestListJSON() string { var list List list.Items = append(list.Items, Item{Key: strconv.FormatInt(rand.Int63(), 10), Icon: "icon1", Name: "name", Profile: "profile"}) list.Items = append(list.Items, Item{Key: strconv.FormatInt(rand.Int63(), 10), Icon: "icon2", Name: "name", Profile: "profile"}) list.Items = append(list.Items, Item{Key: strconv.FormatInt(rand.Int63(), 10), Icon: "icon3", Name: "name", Profile: "profile"}) jsonBytes, _ := json.Marshal(list) return string(jsonBytes) }
func GenerateHandle() *Handle { return &Handle{ timestamp: time.Now().Unix(), n1: rand.Int63(), n2: rand.Int63(), n3: rand.Int63(), } }
func BenchmarkVolumeWrite(b *testing.B) { var ( v *Volume err error file = "./test/testb2" ifile = "./test/testb2.idx" data = make([]byte, _16kb) // 16kb ) os.Remove(file) os.Remove(ifile) defer os.Remove(file) defer os.Remove(ifile) if _, err = rand.Read(data); err != nil { b.Errorf("rand.Read() error(%v)", err) b.FailNow() } if v, err = NewVolume(1, file, ifile, testConf); err != nil { b.Errorf("NewVolume() error(%v)", err) b.FailNow() } defer v.Close() b.SetParallelism(8) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { var ( i, j int ts int32 t int64 err1 error n *needle.Needle ns = make([]needle.Needle, 9) buf = make([]byte, 163840) // 16kb ) for i = 0; i < 9; i++ { t = mrand.Int63() n = &ns[i] n.Init(t, 1, data) n.Write(buf[ts:]) ts += n.TotalSize } for pb.Next() { for j = 0; j < 9; j++ { t = mrand.Int63() n = &ns[j] n.Key = t binary.BigEndian.PutInt64(buf[n.TotalSize+needle.KeyOffset:], n.Key) } if err1 = v.Write(ns, buf[:ts]); err1 != nil { b.Errorf("Add() error(%v)", err1) v.Unlock() b.FailNow() } b.SetBytes(int64(ts)) } }) os.Remove(file) os.Remove(ifile) }
// // servers[] contains the ports of the set of // servers that will cooperate via Paxos to // form the fault-tolerant shardmaster service. // me is the index of the current server in servers[]. // func StartServer(servers []string, me int) *ShardMaster { gob.Register(Op{}) sm := new(ShardMaster) sm.me = me sm.configs = make([]Config, 1) sm.configs[0].Groups = map[int64][]string{} //your initialization sm.cfgnum = 0 rpcs := rpc.NewServer() rpcs.Register(sm) sm.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } sm.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for sm.dead == false { conn, err := sm.l.Accept() if err == nil && sm.dead == false { if sm.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if sm.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && sm.dead == false { fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error()) sm.Kill() } } }() return sm }
// create a user for that organisation func generateUser(org *db.Organisation) { u := &db.User{ OrgId: org.Id, Email: strconv.FormatInt(rand.Int63(), 10), Password: strconv.FormatInt(rand.Int63(), 10), } db.AddTemp("users", u) users = append(users, u) }
func randPos(maxlen, maxpos, lastpos int) (pos, length int64) { mrand.Seed(time.Now().UnixNano()) maxlenI, maxposI, lastposI := int64(maxlen), int64(maxpos), int64(lastpos) for (pos+length > lastposI) || (pos+length == 0) { pos = mrand.Int63() % maxposI length = mrand.Int63() % maxlenI } return int64(pos), int64(length) }
func (m *MTProto) SendMedia(peer_id string, file string) (err error) { _512k := 512 * 1024 peer, _ := parsePeerById(peer_id) bytes, err := ioutil.ReadFile(file) if err != nil { return fmt.Errorf("Error to read file: %#v", err) } md5_hash := fmt.Sprintf("%x", md5.Sum(bytes)) fileId := rand.Int63() parts := int32(len(bytes)/_512k) + 1 start := 0 for i := int32(0); i < parts; i++ { fmt.Println(i, "/", parts) resp := make(chan TL, 1) end := start + _512k if end > len(bytes) { end = len(bytes) } m.queueSend <- packetToSend{ TL_upload_saveFilePart{ fileId, i, bytes[start:end], }, resp, } x := <-resp _, ok := x.(TL_boolTrue) if !ok { return fmt.Errorf("upload_saveFilePart RPC: %#v", x) } start = end } resp := make(chan TL, 1) m.queueSend <- packetToSend{ TL_messages_sendMedia{ peer, TL_inputMediaUploadedPhoto{ TL_inputFile{ fileId, parts, file, md5_hash, }, }, rand.Int63(), }, resp, } x := <-resp _, ok := x.(TL_messages_statedMessage) if !ok { return fmt.Errorf("messages_sendMedia RPC: %#v", x) } return nil }
func TestStoreSpans(t *testing.T) { rand.Seed(time.Now().UnixNano()) store, err := mysql.Open() if err != nil { t.Fatal(err) } defer store.Close() traceID := fmt.Sprintf("%016x", rand.Int63()) id, name, duration := fmt.Sprintf("%016x", rand.Int63()), escape("TestPostSpans"), int64(time.Microsecond*1000) id2, name2, duration2 := fmt.Sprintf("%016x", rand.Int63()), escape("TestPostSpans2"), int64(time.Microsecond*1000) debug := true ipv4, port, serviceName1, serviceName2 := "127.0.0.1", int64(80), "store_test", "store_test2" ep1 := &models.Endpoint{IPV4: &ipv4, Port: &port, ServiceName: &serviceName1} ep2 := &models.Endpoint{IPV4: &ipv4, Port: &port, ServiceName: &serviceName2} ts := time.Now().UnixNano() / 1000 annKey1, annValue1 := "key1", base64.StdEncoding.EncodeToString([]byte("value1")) req := []*models.Span{ { TraceID: &traceID, ID: &id, Name: &name, ParentID: nil, Timestamp: ts, Duration: &duration, Debug: &debug, Annotations: []*models.Annotation{ {ep1, Int64(ts + 100), models.AnnotationValue_SERVER_RECV.Addr()}, {ep1, Int64(ts + 200), models.AnnotationValue_CLIENT_SEND.Addr()}, {ep1, Int64(ts + 300), models.AnnotationValue_CLIENT_RECV.Addr()}, {ep1, Int64(ts + 400), models.AnnotationValue_SERVER_SEND.Addr()}, }, }, { TraceID: &traceID, ID: &id2, Name: &name2, ParentID: &id, Timestamp: ts + 200, Duration: &duration2, Debug: &debug, Annotations: []*models.Annotation{ {ep2, Int64(ts + 210), models.AnnotationValue_SERVER_RECV.Addr()}, {ep2, Int64(ts + 220), models.AnnotationValue_CLIENT_SEND.Addr()}, {ep2, Int64(ts + 230), models.AnnotationValue_CLIENT_RECV.Addr()}, {ep2, Int64(ts + 240), models.AnnotationValue_SERVER_SEND.Addr()}, }, BinaryAnnotations: []*models.BinaryAnnotation{ {models.AnnotationType_STRING.Addr(), ep2, &annKey1, &annValue1}, }, }} err = store.StoreSpans(models.ListOfSpans(req)) if err != nil { t.Fatal(err) } }
func Benchmark_Get_Complex_Map(b *testing.B) { set := "get_bench_str_10000" // bins := []*Bin{NewBin("b", []interface{}{"a simple string", nil, rand.Int63(), []byte{12, 198, 211}})} bins := []*Bin{NewBin("b", map[interface{}]interface{}{rand.Int63(): rand.Int63()})} b.N = 1000 runtime.GC() b.ResetTimer() makeDataForGetBench(set, bins) doGet(set, b) }
func getMessage() []*TCPPacket { ack := uint32(rand.Int63()) seq2 := uint32(rand.Int63()) seq := uint32(rand.Int63()) return []*TCPPacket{ buildPacket(true, ack, seq, []byte("GET / HTTP/1.1\r\n\r\n"), time.Now()), buildPacket(false, seq+18, seq2, []byte("HTTP/1.1 200 OK\r\n\r\n"), time.Now()), } }
// // servers[] contains the ports of the set of // servers that will cooperate via Paxos to // form the fault-tolerant shardmaster service. // me is the index of the current server in servers[]. // func StartServer(me string) *ObliviousReplica { or := new(ObliviousReplica) or.me = me rpcs := rpc.NewServer() rpcs.Register(or) or.UID = GetMD5Hash(me) os.Mkdir("data", 0700) or.dataPath = "data/replica-" + or.UID os.Mkdir(or.dataPath, 0700) os.Remove(me) l, e := net.Listen(Network, me) if e != nil { log.Fatal("listen error: ", e) } or.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for or.dead == false { conn, err := or.l.Accept() if err == nil && or.dead == false { if or.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if or.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && or.dead == false { fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error()) or.Kill() } } }() return or }
// // Test the scenarios described in Figure 8 of the extended Raft paper. Each // iteration asks a leader, if there is one, to insert a command in the Raft // log. If there is a leader, that leader will fail quickly with a high // probability (perhaps without committing the command), or crash after a while // with low probability (most likey committing the command). If the number of // alive servers isn't enough to form a majority, perhaps start a new server. // The leader in a new term may try to finish replicating log entries that // haven't been committed yet. // func TestFigure8(t *testing.T) { servers := 5 cfg := make_config(t, servers, false) defer cfg.cleanup() fmt.Printf("Test: Figure 8 ...\n") cfg.one(rand.Int(), 1) nup := servers for iters := 0; iters < 1000; iters++ { leader := -1 for i := 0; i < servers; i++ { if cfg.rafts[i] != nil { _, _, ok := cfg.rafts[i].Start(rand.Int()) if ok { leader = i } } } if (rand.Int() % 1000) < 100 { ms := rand.Int63() % (int64(RaftElectionTimeout/time.Millisecond) / 2) time.Sleep(time.Duration(ms) * time.Millisecond) } else { ms := (rand.Int63() % 13) time.Sleep(time.Duration(ms) * time.Millisecond) } if leader != -1 { cfg.crash1(leader) nup -= 1 } if nup < 3 { s := rand.Int() % servers if cfg.rafts[s] == nil { cfg.start1(s) cfg.connect(s) nup += 1 } } } for i := 0; i < servers; i++ { if cfg.rafts[i] == nil { cfg.start1(i) cfg.connect(i) } } cfg.one(rand.Int(), servers) fmt.Printf(" ... Passed\n") }
// Around 0.37 ns/op on my laptop. // This is 25x faster than string comparisons, so iterating over and merging // lists of ints would be a lot faster than doing the same for strings. func BenchmarkInt64(b *testing.B) { rand.Seed(time.Now().UnixNano()) var m, n int64 m = rand.Int63() n = rand.Int63() b.ResetTimer() for i := 0; i < b.N; i++ { _ = m == n } }
func Benchmark_Siphash(b *testing.B) { size := 1024 k1, k2 := uint64(rand.Int63()), uint64(rand.Int63()) msg := randArray(size) b.SetBytes(int64(size)) b.ResetTimer() for i := 0; i < b.N; i++ { siphash.Hash(k1, k2, msg) } }
func TestFigure8Unreliable(t *testing.T) { servers := 5 cfg := make_config(t, servers, true) defer cfg.cleanup() fmt.Printf("Test: Figure 8 (unreliable) ...\n") cfg.one(rand.Int()%10000, 1) nup := servers for iters := 0; iters < 1000; iters++ { if iters == 200 { cfg.setlongreordering(true) } leader := -1 for i := 0; i < servers; i++ { _, _, ok := cfg.rafts[i].Start(rand.Int() % 10000) if ok && cfg.connected[i] { leader = i } } if (rand.Int() % 1000) < 100 { ms := rand.Int63() % (int64(RaftElectionTimeout/time.Millisecond) / 2) time.Sleep(time.Duration(ms) * time.Millisecond) } else { ms := (rand.Int63() % 13) time.Sleep(time.Duration(ms) * time.Millisecond) } if leader != -1 && (rand.Int()%1000) < int(RaftElectionTimeout/time.Millisecond)/2 { cfg.disconnect(leader) nup -= 1 } if nup < 3 { s := rand.Int() % servers if cfg.connected[s] == false { cfg.connect(s) nup += 1 } } } for i := 0; i < servers; i++ { if cfg.connected[i] == false { cfg.connect(i) } } cfg.one(rand.Int()%10000, servers) fmt.Printf(" ... Passed\n") }