func doConcurrent(t *testing.T, unreliable bool) { smPorts, gids, kvPorts := setup("conc"+strconv.FormatBool(unreliable), unreliable, numGroups, numReplicas) // Start listening on reboot channel in case we're testing persistence //rebootDone := 0 //go rebootListener(&rebootDone, unreliable, smPorts, gids, kvPorts, kvServers, numGroups, numReplicas) smClerk := shardmaster.MakeClerk(smPorts, false) for i := 0; i < len(gids); i++ { smClerk.Join(gids[i], kvPorts[i]) } const npara = 11 var doneChannels [npara]chan bool for i := 0; i < npara; i++ { doneChannels[i] = make(chan bool) go func(me int) { ok := true defer func() { doneChannels[me] <- ok }() kvClerk := shardkv.MakeClerk(smPorts, false) mysmClerk := shardmaster.MakeClerk(smPorts, false) key := strconv.Itoa(me) last := "" for iters := 0; iters < 3; iters++ { nv := strconv.Itoa(rand.Int()) v := kvClerk.PutHash(key, nv) if v != last { ok = false t.Fatalf("PutHash(%v) expected %v got %v\n", key, last, v) } last = NextValue(last, nv) v = kvClerk.Get(key) if v != last { ok = false t.Fatalf("Get(%v) expected %v got %v\n", key, last, v) } mysmClerk.Move(rand.Int()%shardmaster.NShards, gids[rand.Int()%len(gids)]) time.Sleep(time.Duration(rand.Int()%30) * time.Millisecond) } }(i) } for i := 0; i < npara; i++ { x := <-doneChannels[i] if x == false { t.Fatalf("something is wrong") } } //rebootDone = 1 time.Sleep(2 * time.Second) }
func doConcurrent(t *testing.T, unreliable bool) { smh, gids, ha, _, clean := setup("conc"+strconv.FormatBool(unreliable), unreliable) defer clean() mck := shardmaster.MakeClerk(smh) for i := 0; i < len(gids); i++ { mck.Join(gids[i], ha[i]) } const npara = 11 var ca [npara]chan bool for i := 0; i < npara; i++ { ca[i] = make(chan bool) go func(me int) { ok := true defer func() { ca[me] <- ok }() ck := MakeClerk(smh) mymck := shardmaster.MakeClerk(smh) key := strconv.Itoa(me) last := "" for iters := 0; iters < 3; iters++ { nv := strconv.Itoa(rand.Int()) v := ck.PutHash(key, nv) if v != last { ok = false t.Fatalf("PutHash(%v) expected %v got %v\n", key, last, v) } last = NextValue(last, nv) v = ck.Get(key) if v != last { ok = false t.Fatalf("Get(%v) expected %v got %v\n", key, last, v) } mymck.Move(rand.Int()%shardmaster.NShards, gids[rand.Int()%len(gids)]) time.Sleep(time.Duration(rand.Int()%30) * time.Millisecond) } }(i) } for i := 0; i < npara; i++ { x := <-ca[i] if x == false { t.Fatalf("something is wrong") } } }
func main() { defer clean() mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck.Subscribe("a") ck.Subscribe("b") ck.Subscribe("c") ck.Subscribe("d") ck.Subscribe("e") defer cleanupClerk(ck) go listener(ck) go h.run() m := martini.Classic() m.Use(render.Renderer()) m.Get("/", func(r render.Render) { r.HTML(200, "index", "") }) m.Get("/ws", wsHandler) m.Run() }
func TestExpiryBasic(t *testing.T) { smh, gids, ha, _, clean, _ := setup("expiry-move", false, false) defer clean() fmt.Printf("Test: Expiry Basic ...\n") mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck := MakeClerk(smh) defer cleanupClerk(ck) ttl := 2 * time.Second ck.PutExt("a", "x", false, ttl) v := ck.Get("a") if v != "x" { t.Fatalf("Get got wrong value") } time.Sleep(ttl) ov := ck.Get("a") if ov != "" { t.Fatalf("Get got value, should've expired") } ov = ck.PutHash("a", "b") if ov != "" { t.Fatalf("Put got value, should've expired") } fmt.Printf(" ... Passed\n") }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.clientID = nrand() // Unique ID for this client return ck }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.me = strconv.FormatInt(nrand(), 16) return ck }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.me = nrandint() return ck }
func TestBasic(t *testing.T) { smh, gids, ha, _, clean := setup("basic", false) defer clean() fmt.Printf("Test: Basic Join/Leave ...\n") mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck := MakeClerk(smh) ck.Put("a", "x") v := ck.PutHash("a", "b") if v != "x" { t.Fatalf("Puthash got wrong value") } ov := NextValue("x", "b") if ck.Get("a") != ov { t.Fatalf("Get got wrong value") } keys := make([]string, 10) vals := make([]string, len(keys)) for i := 0; i < len(keys); i++ { keys[i] = strconv.Itoa(rand.Int()) vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } // are keys still there after joins? for g := 1; g < len(gids); g++ { mck.Join(gids[g], ha[g]) time.Sleep(1 * time.Second) for i := 0; i < len(keys); i++ { v := ck.Get(keys[i]) if v != vals[i] { t.Fatalf("joining; wrong value; g=%v k=%v wanted=%v got=%v", g, keys[i], vals[i], v) } vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } } // are keys still there after leaves? for g := 0; g < len(gids)-1; g++ { mck.Leave(gids[g]) time.Sleep(1 * time.Second) for i := 0; i < len(keys); i++ { v := ck.Get(keys[i]) if v != vals[i] { t.Fatalf("leaving; wrong value; g=%v k=%v wanted=%v got=%v", g, keys[i], vals[i], v) } vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } } fmt.Printf(" ... Passed\n") }
// // the tester calls MakeClerk. // // masters[] is needed to call shardmaster.MakeClerk(). // // make_end(servername) turns a server name from a // Config.Groups[gid][i] into a labrpc.ClientEnd on which you can // send RPCs. // func MakeClerk(masters []*labrpc.ClientEnd, make_end func(string) *labrpc.ClientEnd) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(masters) ck.make_end = make_end // You'll have to add code here. return ck }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) ck.id = rand.Int() ck.get_request_id = make_int_generator() return ck }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.uid = fmt.Sprintf("%d_%d", time.Now().UnixNano(), rand.Int63()) return ck }
func TestMove(t *testing.T) { smh, gids, ha, _, clean, _ := setup("move", false, false) defer clean() fmt.Printf("Test: Shards really move ...\n") mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck := MakeClerk(smh) defer cleanupClerk(ck) // insert one key per shard for i := 0; i < shardmaster.NShards; i++ { ck.Put(string('0'+i), string('0'+i)) } // add group 1. mck.Join(gids[1], ha[1]) time.Sleep(5 * time.Second) // check that keys are still there. for i := 0; i < shardmaster.NShards; i++ { if ck.Get(string('0'+i)) != string('0'+i) { t.Fatalf("missing key/value") } } // remove sockets from group 0. for i := 0; i < len(ha[0]); i++ { os.Remove(ha[0][i]) } count := 0 var mu sync.Mutex for i := 0; i < shardmaster.NShards; i++ { go func(me int) { myck := MakeClerk(smh) defer cleanupClerk(myck) v := myck.Get(string('0' + me)) if v == string('0'+me) { mu.Lock() count++ mu.Unlock() } else { t.Fatalf("Get(%v) yielded %v\n", i, v) } }(i) } time.Sleep(10 * time.Second) if count > shardmaster.NShards/3 && count < 2*(shardmaster.NShards/3) { fmt.Printf(" ... Passed\n") } else { t.Fatalf("%v keys worked after killing 1/2 of groups; wanted %v", count, shardmaster.NShards/2) } }
func MakeClerk(shardmasters []string, network bool) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters, network) ck.me = nrand() ck.network = network ck.clientID = nrand() return ck }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.cid = nrand() ck.seq = -1 return ck }
func TestManyClientOneShard(t *testing.T) { nclients := 35 nseconds := 10 smPorts, gids, kvPorts := setup("basic", false, numGroups, numReplicas) //defer clean() fmt.Printf("\nBenchmark: many clients, one shard...\n") smClerk := shardmaster.MakeClerk(smPorts, true) for i := 0; i < len(gids); i++ { smClerk.Join(gids[i], kvPorts[i]) } counts := make([]int, nclients) for i := 0; i < nclients; i++ { go func(i int) { ck := shardkv.MakeClerk(smPorts, true) tStart := time.Now() count := 0 for time.Since(tStart).Seconds() < float64(nseconds) { ck.PutHash("a", strconv.Itoa(rand.Int())) count++ } counts[i] = count }(i) } toWait := 12 * time.Second time.Sleep(toWait) tot := 0 for _, c := range counts { tot += c } fmt.Printf("%f operations per second\n", float64(tot)/float64(nseconds)) fmt.Printf("\nBenchmark: many clients, many shards...\n") for i := 0; i < nclients; i++ { go func(i int) { ck := shardkv.MakeClerk(smPorts, true) tStart := time.Now() count := 0 for time.Since(tStart).Seconds() < float64(nseconds) { ck.PutHash(strconv.Itoa(rand.Int()), strconv.Itoa(rand.Int())) count++ } counts[i] = count }(i) } time.Sleep(toWait) tot = 0 for _, c := range counts { tot += c } fmt.Printf("%f operations per second\n", float64(tot)/float64(nseconds)) }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) // You'll have to modify MakeClerk. ck.ID = strconv.FormatInt(nrand(), 10) ck.Counter = 1 ck.Leaders = make(map[int64]string) return ck }
// Start all servers/clients on local machine. // Returns a system and a clean function func localSetup(name string, args SetupArgs) (System, func()) { runtime.GOMAXPROCS(args.NumProcesses) // Start shardmasters smHosts := make([]string, args.NumMasters) sms := make([]*shardmaster.ShardMaster, args.NumMasters) for i := 0; i < args.NumMasters; i++ { smHosts[i] = port(name+"m", i) sms[i] = shardmaster.StartServer(smHosts, i) } smClerk := shardmaster.MakeClerk(smHosts) // Start ShardKV servers gids := make([]int64, args.NumGroups) skvHosts := make([][]string, args.NumGroups) skvs := make([][]*shardkv.ShardKV, args.NumGroups) for i := 0; i < args.NumGroups; i++ { gids[i] = int64(100 + i) skvHosts[i] = make([]string, args.NumReplicas) skvs[i] = make([]*shardkv.ShardKV, args.NumReplicas) for j := 0; j < args.NumReplicas; j++ { skvHosts[i][j] = port(name+"s", i*args.NumReplicas+j) skvs[i][j] = shardkv.StartServer(gids[i], smHosts, skvHosts[i], j) } smClerk.Join(gids[i], skvHosts[i]) } // Start clients clerks := make([]*shardkv.Clerk, args.NumClients) for i := 0; i < args.NumClients; i++ { clerks[i] = shardkv.MakeClerk(smHosts) } system := System{ ShardMasterHosts: smHosts, ShardMasters: sms, ShardMasterClerk: smClerk, GroupIDs: gids, ShardKVHosts: skvHosts, ShardKVs: skvs, Clerks: clerks, } clean := func() { for i := 0; i < args.NumMasters; i++ { sms[i].Kill() } for i := 0; i < args.NumGroups; i++ { for j := 0; j < args.NumReplicas; j++ { skvs[i][j].Kill() } } } return system, clean }
func TestPubSubMove(t *testing.T) { smh, gids, ha, _, clean, _ := setup("pubsub-move", false, false) defer clean() fmt.Printf("Test: Pub/Sub Multiple Move ...\n") mck := shardmaster.MakeClerk(smh) for i := 0; i < len(gids); i++ { mck.Join(gids[i], ha[i]) } ck := MakeClerk(smh) defer cleanupClerk(ck) ck.Subscribe("d") v := <-ck.Receive if v.Key() != "d" { t.Fatalf("Subscribed to wrong key") } for i := 0; i < shardmaster.NShards; i++ { val := string('0' + i) // Put random keys ck.Put("a", "aax") ck.Put("b", "bbx") ck.Put("c", "ccx") ck.Put("d", val) v = <-ck.Receive if v.PutValue() != val { t.Fatalf("Receive got the wrong value") } mck.Move(0, gids[rand.Int()%len(gids)]) } // Unsubscribe from key ck.Unsubscribe("d") v = <-ck.Receive if v.Key() != "d" { t.Fatalf("Unsubscribed from wrong key") } close(ck.Receive) for i := 0; i < shardmaster.NShards; i++ { val := string('0' + i) ck.Put("d", val) mck.Move(0, gids[rand.Int()%len(gids)]) } time.Sleep(30 * time.Millisecond) fmt.Printf(" ... Passed\n") }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) ck.id = strconv.Itoa(int(nrand())) ck.counter = 0 ck.addr = "/var/tmp/824-" + ck.id ck.Receive = make(chan messagebroker.PublishArgs) ck.mb = messagebroker.MakeClerk(ck.addr, ck.Receive) return ck }
func (cfg *config) shardclerk() *shardmaster.Clerk { // ClientEnds to talk to master service. ends := make([]*labrpc.ClientEnd, cfg.nmasters) for j := 0; j < cfg.nmasters; j++ { name := randstring(20) ends[j] = cfg.net.MakeEnd(name) cfg.net.Connect(name, cfg.mastername(j)) cfg.net.Enable(name, true) } return shardmaster.MakeClerk(ends) }
func TestPubSubJoin(t *testing.T) { smh, gids, ha, _, clean, _ := setup("pubsub-join", false, false) defer clean() fmt.Printf("Test: Pub/Sub Join ...\n") mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck := MakeClerk(smh) defer cleanupClerk(ck) // Subscribe from key ck.Subscribe("d") v := <-ck.Receive if v.Key() != "d" { t.Fatalf("Subscribed to wrong key") } // Put random keys ck.Put("a", "x") ck.Put("b", "x") ck.Put("c", "x") // Should receive changes to key=d ck.Put("d", "x") v = <-ck.Receive if v.PutValue() != "x" { t.Fatalf("Receive got the wrong value") } // New group join, old group leave mck.Join(gids[1], ha[1]) mck.Leave(gids[0]) // Unsubscribe from key ck.Unsubscribe("d") v = <-ck.Receive if v.Key() != "d" { t.Fatalf("Unsubscribed from wrong key") } // Close receive channel close(ck.Receive) // Souldn't receive anything now - will panic otherwise ck.Put("d", "x") time.Sleep(30 * time.Millisecond) fmt.Printf(" ... Passed\n") }
func BenchmarkClientLatencyOneShard(benchmark *testing.B) { smPorts, gids, kvPorts := setup("basic", false, numGroups, numReplicas) //defer clean() fmt.Printf("\nBenchmark: client latency, one shard...\n") smClerk := shardmaster.MakeClerk(smPorts, true) smClerk.Join(gids[0], kvPorts[0]) kvClerk := shardkv.MakeClerk(smPorts, true) benchmark.ResetTimer() //tStart := time.Now() for i := 0; i < benchmark.N; i++ { kvClerk.PutHash("a", strconv.Itoa(rand.Int())) } }
func TestDiskRecovery(t *testing.T) { nclients := 1 // numBytes := 20971520 //20MB nItems := 99 keySize := 32 valSize := 1024 * 1024 smPorts, gids, kvPorts := setup("basic", false, numGroups, numReplicas) //defer clean() fmt.Printf("\nDisk recovery benchmark...\n") smClerk := shardmaster.MakeClerk(smPorts, true) smClerk.Join(gids[0], kvPorts[0]) counts := make([]int, nclients) fmt.Printf("\nBenchmark: many clients, many shards...\n") for i := 0; i < nclients; i++ { go func(c int) { ck := shardkv.MakeClerk(smPorts, true) for i := 0; i < nItems; i++ { ck.Put(paddedRandIntString(keySize), paddedRandIntString(valSize)) counts[c]++ } }(i) } fmt.Println("I am here") sum := 0 toWait := 22 * time.Millisecond for sum < nItems { time.Sleep(toWait) sum = 0 for _, i := range counts { sum += i } if sum%1024 == 0 { fmt.Printf("%v/%v\n", sum, nItems) } } fmt.Println("\n", nItems*(keySize+valSize)*nclients/1000000, " MB of data written to database...\n") }
func TestExpiryMove(t *testing.T) { smh, gids, ha, _, clean, _ := setup("expiry-move", false, false) defer clean() fmt.Printf("Test: Expiry Multiple Move ...\n") mck := shardmaster.MakeClerk(smh) for i := 0; i < len(gids); i++ { mck.Join(gids[i], ha[i]) } ck := MakeClerk(smh) defer cleanupClerk(ck) ttl := 1 * time.Second for i := 0; i < shardmaster.NShards; i++ { key := string('0' + i) val := string('0' + i) ck.PutExt(key, val, false, ttl) v := ck.Get(key) if v != val { t.Fatalf("Get got wrong value, expected=%s, got=%s", val, v) } time.Sleep(ttl) ov := ck.Get(key) if ov != "" { t.Fatalf("Get got value, should've expired") } mck.Move(0, gids[rand.Int()%len(gids)]) } fmt.Printf(" ... Passed\n") }
func TestLimp(t *testing.T) { smh, gids, ha, sa, clean := setup("limp", false) defer clean() fmt.Printf("Test: Reconfiguration with some dead replicas ...\n") mck := shardmaster.MakeClerk(smh) mck.Join(gids[0], ha[0]) ck := MakeClerk(smh) ck.Put("a", "b") if ck.Get("a") != "b" { t.Fatalf("got wrong value") } for g := 0; g < len(sa); g++ { to_kill := rand.Int() % len(sa[g]) sa[g][to_kill].kill() } keys := make([]string, 10) vals := make([]string, len(keys)) for i := 0; i < len(keys); i++ { keys[i] = strconv.Itoa(rand.Int()) vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } // are keys still there after joins? for g := 1; g < len(gids); g++ { mck.Join(gids[g], ha[g]) time.Sleep(1 * time.Second) for i := 0; i < len(keys); i++ { v := ck.Get(keys[i]) if v != vals[i] { t.Fatalf("joining; wrong value; g=%v k=%v wanted=%v got=%v", g, keys[i], vals[i], v) } vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } } // are keys still there after leaves? for g := 0; g < len(gids)-1; g++ { mck.Leave(gids[g]) time.Sleep(2 * time.Second) for i := 0; i < len(sa[g]); i++ { sa[g][i].kill() } for i := 0; i < len(keys); i++ { v := ck.Get(keys[i]) if v != vals[i] { t.Fatalf("leaving; wrong value; g=%v k=%v wanted=%v got=%v", g, keys[i], vals[i], v) } vals[i] = strconv.Itoa(rand.Int()) ck.Put(keys[i], vals[i]) } } fmt.Printf(" ... Passed\n") }
// // Start a shardkv server. // gid is the ID of the server's replica group. // shardmasters[] contains the ports of the // servers that implement the shardmaster. // servers[] contains the ports of the servers // in this replica group. // Me is the index of this server in servers[]. // func StartServer(gid int64, shardmasters []string, servers []string, me int) *ShardKV { gob.Register(Op{}) gob.Register(shardmaster.Config{}) gob.Register(Xaction{}) gob.Register(GetArgs{}) gob.Register(PutArgs{}) gob.Register(GimmeDataArgs{}) gob.Register(GimmeDataReply{}) kv := new(ShardKV) kv.me = me kv.gid = gid kv.sm = shardmaster.MakeClerk(shardmasters) // Your initialization code here. // Don't call Join(). kv.now = kv.sm.Query(0) kv.next = kv.sm.Query(0) kv.contiguous = -1 kv.storage = make(map[string]string) kv.xactions = make(map[string]Xaction) kv.versions = make(map[int]Snapshot) rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error()) kv.kill() } } }() go func() { for kv.dead == false { kv.tick() time.Sleep(250 * time.Millisecond) } }() return kv }
func MakeClerk(shardmasters []string) *Clerk { ck := new(Clerk) ck.sm = shardmaster.MakeClerk(shardmasters) ck.clientId = GenUUID() return ck }
// // Start a shardkv server. // gid is the ID of the server's replica group. // shardmasters[] contains the ports of the // servers that implement the shardmaster. // servers[] contains the ports of the servers // in this replica group. // Me is the index of this server in servers[]. // func StartServer(gid int64, shardmasters []string, servers []string, me int) *ShardKV { gob.Register(Op{}) kv := new(ShardKV) kv.me = me kv.gid = gid kv.uid = strconv.Itoa(me) + string("-") + strconv.FormatInt(gid, 10) kv.sm = shardmaster.MakeClerk(shardmasters) kv.servers = servers // Your initialization code here. kv.logged = make(map[int64]bool) kv.values = make(map[string]string) // Don't call Join(). rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error()) kv.kill() } } }() go func() { for kv.dead == false { kv.tick() time.Sleep(250 * time.Millisecond) } }() return kv }
// // Start a shardkv server. // gid is the ID of the server's replica group. // shardmasters[] contains the ports of the // servers that implement the shardmaster. // servers[] contains the ports of the servers // in this replica group. // Me is the index of this server in servers[]. // func StartServer(gid int64, shardmasters []string, servers []string, me int) *ShardKV { gob.Register(Op{}) gob.Register(ServerState{}) kv := new(ShardKV) kv.me = me kv.gid = gid kv.sm = shardmaster.MakeClerk(shardmasters) // Your initialization code here. // Don't call Join(). kv.localLog = make(map[int]Op) kv.Counter = 1 kv.horizon = 0 kv.configNum = -1 kv.max = 0 // setup cell storage id := int(rand.Int31n(1000000)) // TODO: change to be recoverable? kv.storage = MakeStorage(id, 1000000000, "127.0.0.1:27017") kv.storage.Clear() kv.current = ServerState{make(map[int]map[string]string), make(map[string]ClientReply)} kv.results = make(map[string]ClientReply) kv.configs = make(map[int]shardmaster.Config) kv.configs[-1] = shardmaster.Config{} rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) kv.uid = strconv.FormatInt(nrand(), 10) // Logging kv.logFilename = fmt.Sprintf("logs/paxos_log_%d_%d.log", kv.me, kv.gid) var err error kv.logFile, err = os.OpenFile(kv.logFilename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { log.Fatal(err) } kv.enc = gob.NewEncoder(kv.logFile) // go kv.logBackground() os.Remove(servers[me]) l, e := net.Listen(Network, servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { DPrintf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { DPrintf("ShardKV(%v) accept: %v\n", me, err.Error()) kv.Kill() } } }() go func() { for kv.dead == false { kv.tick() time.Sleep(250 * time.Millisecond) } }() return kv }
// // Start a shardkv server. // gid is the ID of the server's replica group. // shardmasters[] contains the ports of the // servers that implement the shardmaster. // servers[] contains the ports of the servers // in this replica group. // Me is the index of this server in servers[]. // func StartServer(gid int64, shardmasters []string, servers []string, me int, messagebrokers []string) *ShardKV { gob.Register(Op{}) gob.Register(PutArgs{}) gob.Register(GetArgs{}) gob.Register(SubscribeArgs{}) gob.Register(ReconfigArgs{}) gob.Register(TransferArgs{}) kv := new(ShardKV) kv.me = me kv.gid = gid kv.sm = shardmaster.MakeClerk(shardmasters) kv.mb = messagebroker.MakeNotifier(messagebrokers) kv.config = kv.sm.Query(0) kv.table = make(map[string]*Entry) kv.tableCache = make(map[int]map[string]*Entry) kv.reqs = make(map[string]*Result) kv.reqsCache = make(map[int]map[string]*Result) kv.lastAppliedSeq = -1 rpcs := rpc.NewServer() rpcs.Register(kv) kv.px = paxos.Make(servers, me, rpcs) os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } kv.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for kv.dead == false { conn, err := kv.l.Accept() if err == nil && kv.dead == false { if kv.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if kv.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && kv.dead == false { fmt.Printf("ShardKV(%v) accept: %v\n", me, err.Error()) kv.Kill() } } }() go func() { for !kv.dead { kv.tick() time.Sleep(250 * time.Millisecond) } }() go kv.logSync() return kv }