func StartServer(vshost string, me string) *PBServer { pb := new(PBServer) pb.me = me pb.vs = viewservice.MakeClerk(me, vshost) // Your pb.* initializations here. pb.view = viewservice.View{} pb.content = make(map[string]string) pb.client = make(map[string]string) rpcs := rpc.NewServer() rpcs.Register(pb) os.Remove(pb.me) l, e := net.Listen("unix", pb.me) if e != nil { log.Fatal("listen error: ", e) } pb.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for pb.isdead() == false { conn, err := pb.l.Accept() if err == nil && pb.isdead() == false { if pb.isunreliable() && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if pb.isunreliable() && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && pb.isdead() == false { fmt.Printf("PBServer(%v) accept: %v\n", me, err.Error()) pb.kill() } } }() go func() { for pb.isdead() == false { pb.tick() time.Sleep(viewservice.PingInterval) } }() return pb }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.me = me return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) ck.view, _ = ck.vs.Get() return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.me = fmt.Sprintf("%s%d", me, nrand()) return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.me = "client: " + strconv.Itoa(int(nrand())) return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.currentView = viewservice.View{0, "", ""} return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.id = nrand() ck.seq_num = 0 return ck }
func TestAtMostOnce(t *testing.T) { runtime.GOMAXPROCS(4) tag := "csu" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) fmt.Printf("Test: at-most-once Put; unreliable ...\n") const nservers = 1 var sa [nservers]*PBServer for i := 0; i < nservers; i++ { sa[i] = StartServer(vshost, port(tag, i+1)) sa[i].unreliable = true } for iters := 0; iters < viewservice.DeadPings*2; iters++ { view, _ := vck.Get() if view.Primary != "" && view.Backup != "" { break } time.Sleep(viewservice.PingInterval) } // give p+b time to ack, initialize time.Sleep(viewservice.PingInterval * viewservice.DeadPings) ck := MakeClerk(vshost, "") k := "counter" val := "" for i := 0; i < 100; i++ { v := strconv.Itoa(i) pv := ck.PutHash(k, v) if pv != val { fmt.Println(i) t.Fatalf("ck.Puthash() returned %v but expected %v\n", pv, val) } h := hash(val + v) val = strconv.Itoa(int(h)) } v := ck.Get(k) if v != val { fmt.Println("101") t.Fatalf("ck.Get() returned %v but expected %v\n", v, val) } fmt.Printf(" ... Passed\n") for i := 0; i < nservers; i++ { sa[i].kill() } time.Sleep(time.Second) vs.Kill() time.Sleep(time.Second) }
func TestAtMostOnce(t *testing.T) { runtime.GOMAXPROCS(4) tag := "tamo" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) fmt.Printf("Test: at-most-once Append; unreliable ...\n") const nservers = 1 var sa [nservers]*PBServer for i := 0; i < nservers; i++ { sa[i] = StartServer(vshost, port(tag, i+1)) sa[i].setunreliable(true) } for iters := 0; iters < viewservice.DeadPings*2; iters++ { view, _ := vck.Get() if view.Primary != "" && view.Backup != "" { break } time.Sleep(viewservice.PingInterval) } view1, _ := vck.Get() // give p+b time to ack, initialize time.Sleep(viewservice.PingInterval * viewservice.DeadPings) view, _ := vck.Get() fmt.Println("->", view.Primary, view.Backup) fmt.Println("->", view1.Primary, view1.Backup) ck := MakeClerk(vshost, "") k := "counter" val := "" for i := 0; i < 100; i++ { v := strconv.Itoa(i) ck.Append(k, v) val = val + v } v := ck.Get(k) if v != val { t.Fatalf("ck.Get() returned %v but expected %v\n", v, val) } fmt.Printf(" ... Passed\n") for i := 0; i < nservers; i++ { sa[i].kill() } time.Sleep(time.Second) vs.Kill() time.Sleep(time.Second) }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) ck.me = me ck.id = nrand() ck.seqNum = 0 return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.currentView = viewservice.View{0, "", ""} ck.id = strconv.FormatInt(nrand(), 10) return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.id = nrand() // For debug. ck.setNewPrimary() return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) ok := false for !ok { ck.view, ok = ck.vs.Get() time.Sleep(viewservice.PingInterval) } return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.me = me ck.primary = ck.vs.Primary() ck.rpc_id = 1 return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here view, _ := ck.vs.Ping(0) newPrimary := view.Primary ck.primary = newPrimary return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here // By Yan ck.curView.Primary = "" ck.curView.Backup = "" ck.curView.Viewnum = 0 return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.view = viewservice.View{} // note: argument me could not garantuee unique, this ck.me field would be put along // with PutArgs to show client's identification ck.me = strconv.FormatInt(nrand(), 10) return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.view = viewservice.View{} ck.xaction = 0 ck.cid = nrand() return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.client_view.Viewnum = 0 ck.client_view.Primary = "" ck.client_view.Backup = "" ck.me = strconv.FormatInt(nrand(), 10) return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ok := false if !ok { ck.view, ok = ck.vs.Get() time.Sleep(viewservice.PingInterval) } return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) //ck.currView,_ = ck.getView() ck.currView = viewservice.View{} //rand.Seed(time.Now().UTC().UnixNano()) ck.ClientID = nrand() // Your ck.* initializations here return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here ck.primary = ck.vs.Primary() ck.rpcid = 1 mu.Lock() defer mu.Unlock() ck.me = strconv.Itoa(clientNum) clientNum = clientNum + 1 return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) // Your ck.* initializations here var success bool ck.view, success = ck.vs.Get() if !success { logger.Debug("ck.vs.Get failed") } return ck }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) ck.vshost = vshost //ck.Me = me ck.Me = strconv.FormatInt(nrand(), 10) ck.mu = &sync.Mutex{} view, _ := ck.vs.Get() ck.view = view ck.server = ck.view.Primary return ck }
func StartServer(vshost string, me string) *PBServer { pb := new(PBServer) pb.me = me pb.vs = viewservice.MakeClerk(me, vshost) pb.finish = make(chan interface{}) // Your pb.* initializations here. pb.current = viewservice.View{Viewnum: 0} pb.db = make(map[string]string) //pb.filter = make(map[string]string) rpcs := rpc.NewServer() rpcs.Register(pb) os.Remove(pb.me) l, e := net.Listen("unix", pb.me) if e != nil { log.Fatal("listen error: ", e) } pb.l = l // please do not change any of the following code, // or do anything to subvert it. go func() { for pb.dead == false { conn, err := pb.l.Accept() if err == nil && pb.dead == false { if pb.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if pb.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } pb.done.Add(1) go func() { rpcs.ServeConn(conn) pb.done.Done() }() } else { pb.done.Add(1) go func() { rpcs.ServeConn(conn) pb.done.Done() }() } } else if err == nil { conn.Close() } if err != nil && pb.dead == false { fmt.Printf("PBServer(%v) accept: %v\n", me, err.Error()) pb.kill() } } DPrintf("%s: wait until all request are done\n", pb.me) pb.done.Wait() // If you have an additional thread in your solution, you could // have it read to the finish channel to hear when to terminate. close(pb.finish) }() pb.done.Add(1) go func() { for pb.dead == false { pb.tick() time.Sleep(viewservice.PingInterval) } pb.done.Done() }() return pb }
func MakeClerk(vshost string, me string) *Clerk { ck := new(Clerk) ck.vs = viewservice.MakeClerk(me, vshost) ck.view = viewservice.View{} return ck }
func TestPartition2(t *testing.T) { runtime.GOMAXPROCS(4) tag := "part2" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) ck1 := MakeClerk(vshost, "") vshosta := vshost + "a" os.Link(vshost, vshosta) s1 := StartServer(vshosta, port(tag, 1)) delay := 0 proxy(t, port(tag, 1), &delay) fmt.Printf("Test: Partitioned old primary does not complete Gets ...\n") deadtime := viewservice.PingInterval * viewservice.DeadPings time.Sleep(deadtime * 2) if vck.Primary() != s1.me { t.Fatal("primary never formed initial view") } s2 := StartServer(vshost, port(tag, 2)) time.Sleep(deadtime * 2) v1, _ := vck.Get() if v1.Primary != s1.me || v1.Backup != s2.me { t.Fatal("backup did not join view") } ck1.Put("a", "1") check(ck1, "a", "1") os.Remove(vshosta) // start a client Get(), but use proxy to delay it long // enough that it won't reach s1 until after s1 is no // longer the primary. delay = 5 stale_get := false go func() { x := ck1.Get("a") if x == "1" { stale_get = true } }() // now s1 cannot talk to viewserver, so view will change. for iter := 0; iter < viewservice.DeadPings*3; iter++ { if vck.Primary() == s2.me { break } time.Sleep(viewservice.PingInterval) } if vck.Primary() != s2.me { t.Fatalf("primary never changed") } s3 := StartServer(vshost, port(tag, 3)) for iter := 0; iter < viewservice.DeadPings*3; iter++ { v, _ := vck.Get() if v.Backup == s3.me && v.Primary == s2.me { break } time.Sleep(viewservice.PingInterval) } v2, _ := vck.Get() if v2.Primary != s2.me || v2.Backup != s3.me { t.Fatalf("new backup never joined") } time.Sleep(2 * time.Second) ck2 := MakeClerk(vshost, "") ck2.Put("a", "2") check(ck2, "a", "2") s2.kill() // wait for delayed get to s1 to complete. time.Sleep(6 * time.Second) if stale_get == true { t.Fatalf("partitioned primary replied to a Get with a stale value") } check(ck2, "a", "2") fmt.Printf(" ... Passed\n") s1.kill() s2.kill() s3.kill() vs.Kill() }
func TestPartition1(t *testing.T) { runtime.GOMAXPROCS(4) tag := "part1" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) ck1 := MakeClerk(vshost, "") fmt.Printf("Test: Old primary does not serve Gets ...\n") vshosta := vshost + "a" os.Link(vshost, vshosta) s1 := StartServer(vshosta, port(tag, 1)) delay := 0 proxy(t, port(tag, 1), &delay) deadtime := viewservice.PingInterval * viewservice.DeadPings time.Sleep(deadtime * 2) if vck.Primary() != s1.me { t.Fatal("primary never formed initial view") } s2 := StartServer(vshost, port(tag, 2)) time.Sleep(deadtime * 2) v1, _ := vck.Get() if v1.Primary != s1.me || v1.Backup != s2.me { t.Fatal("backup did not join view") } ck1.Put("a", "1") check(ck1, "a", "1") os.Remove(vshosta) // start a client Get(), but use proxy to delay it long // enough that it won't reach s1 until after s1 is no // longer the primary. delay = 4 stale_get := false go func() { x := ck1.Get("a") if x == "1" { stale_get = true } }() // now s1 cannot talk to viewserver, so view will change, // and s1 won't immediately realize. for iter := 0; iter < viewservice.DeadPings*3; iter++ { if vck.Primary() == s2.me { break } time.Sleep(viewservice.PingInterval) } if vck.Primary() != s2.me { t.Fatalf("primary never changed") } // wait long enough that s2 is guaranteed to have Pinged // the viewservice, and thus that s2 must know about // the new view. time.Sleep(2 * viewservice.PingInterval) // change the value (on s2) so it's no longer "1". ck2 := MakeClerk(vshost, "") ck2.Put("a", "111") check(ck2, "a", "111") // wait for the background Get to s1 to be delivered. time.Sleep(5 * time.Second) if stale_get { t.Fatalf("Get to old primary succeeded and produced stale value") } check(ck2, "a", "111") fmt.Printf(" ... Passed\n") s1.kill() s2.kill() vs.Kill() }
func TestRepeatedCrashUnreliable(t *testing.T) { runtime.GOMAXPROCS(4) tag := "rcu" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) fmt.Printf("Test: Repeated failures/restarts; unreliable ...\n") const nservers = 3 var sa [nservers]*PBServer for i := 0; i < nservers; i++ { sa[i] = StartServer(vshost, port(tag, i+1)) sa[i].unreliable = true } for i := 0; i < viewservice.DeadPings; i++ { v, _ := vck.Get() if v.Primary != "" && v.Backup != "" { break } time.Sleep(viewservice.PingInterval) } // wait a bit for primary to initialize backup time.Sleep(viewservice.DeadPings * viewservice.PingInterval) done := false go func() { // kill and restart servers rr := rand.New(rand.NewSource(int64(os.Getpid()))) for done == false { i := rr.Int() % nservers // fmt.Printf("%v killing %v\n", ts(), 5001+i) sa[i].kill() // wait long enough for new view to form, backup to be initialized time.Sleep(2 * viewservice.PingInterval * viewservice.DeadPings) sa[i] = StartServer(vshost, port(tag, i+1)) // wait long enough for new view to form, backup to be initialized time.Sleep(2 * viewservice.PingInterval * viewservice.DeadPings) } }() const nth = 2 var cha [nth]chan bool for xi := 0; xi < nth; xi++ { cha[xi] = make(chan bool) go func(i int) { ok := false defer func() { cha[i] <- ok }() ck := MakeClerk(vshost, "") data := map[string]string{} rr := rand.New(rand.NewSource(int64(os.Getpid() + i))) for done == false { k := strconv.Itoa((i * 1000000) + (rr.Int() % 10)) wanted, ok := data[k] if ok { v := ck.Get(k) if v != wanted { t.Fatalf("key=%v wanted=%v got=%v", k, wanted, v) } } nv := strconv.Itoa(rr.Int()) ck.Put(k, nv) data[k] = nv // if no sleep here, then server tick() threads do not get // enough time to Ping the viewserver. time.Sleep(10 * time.Millisecond) } ok = true }(xi) } time.Sleep(20 * time.Second) done = true for i := 0; i < nth; i++ { ok := <-cha[i] if ok == false { t.Fatal("child failed") } } ck := MakeClerk(vshost, "") ck.Put("aaa", "bbb") if v := ck.Get("aaa"); v != "bbb" { t.Fatalf("final Put/Get failed") } fmt.Printf(" ... Passed\n") for i := 0; i < nservers; i++ { sa[i].kill() } time.Sleep(time.Second) vs.Kill() time.Sleep(time.Second) }
func TestConcurrentSameUnreliable(t *testing.T) { runtime.GOMAXPROCS(4) tag := "csu" vshost := port(tag+"v", 1) vs := viewservice.StartServer(vshost) time.Sleep(time.Second) vck := viewservice.MakeClerk("", vshost) fmt.Printf("Test: Concurrent Put()s to the same key; unreliable ...\n") const nservers = 2 var sa [nservers]*PBServer for i := 0; i < nservers; i++ { sa[i] = StartServer(vshost, port(tag, i+1)) sa[i].unreliable = true } for iters := 0; iters < viewservice.DeadPings*2; iters++ { view, _ := vck.Get() if view.Primary != "" && view.Backup != "" { break } time.Sleep(viewservice.PingInterval) } // give p+b time to ack, initialize time.Sleep(viewservice.PingInterval * viewservice.DeadPings) done := false view1, _ := vck.Get() const nclients = 3 const nkeys = 2 for xi := 0; xi < nclients; xi++ { go func(i int) { ck := MakeClerk(vshost, "") rr := rand.New(rand.NewSource(int64(os.Getpid() + i))) for done == false { k := strconv.Itoa(rr.Int() % nkeys) v := strconv.Itoa(rr.Int()) ck.Put(k, v) } }(xi) } time.Sleep(5 * time.Second) done = true time.Sleep(time.Second) // read from primary ck := MakeClerk(vshost, "") var vals [nkeys]string for i := 0; i < nkeys; i++ { vals[i] = ck.Get(strconv.Itoa(i)) if vals[i] == "" { t.Fatalf("Get(%v) failed from primary", i) } } // kill the primary for i := 0; i < nservers; i++ { if view1.Primary == sa[i].me { sa[i].kill() break } } for iters := 0; iters < viewservice.DeadPings*2; iters++ { view, _ := vck.Get() if view.Primary == view1.Backup { break } time.Sleep(viewservice.PingInterval) } view2, _ := vck.Get() if view2.Primary != view1.Backup { t.Fatal("wrong Primary") } // read from old backup for i := 0; i < nkeys; i++ { z := ck.Get(strconv.Itoa(i)) if z != vals[i] { t.Fatalf("Get(%v) from backup; wanted %v, got %v", i, vals[i], z) } } fmt.Printf(" ... Passed\n") for i := 0; i < nservers; i++ { sa[i].kill() } time.Sleep(time.Second) vs.Kill() time.Sleep(time.Second) }