// Tests the integration of View Change with Signer (ability to reach consensus on a view change) // After achieving consensus, View is not actually changed, because of Signer test framework limitations // See tests in stamp/ for the actual view change ocurring func TestViewChangeTCP(t *testing.T) { time.Sleep(5 * time.Second) if testing.Short() { t.Skip("skipping test in short mode.") } hc, err := oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Fatal("error loading configuration: ", err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal("error running:", err) } // give it some time to set up time.Sleep(2 * time.Second) N := 6 for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("hello world") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) } }
func TestTCPStaticConfigVote(t *testing.T) { hc, err := oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Error(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.Voter) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("Hello Voting") vote := &sign.Vote{Type: sign.RemoveVT, Rv: &sign.RemoveVote{Name: "host5", Parent: "host4"}} err = hc.SNodes[0].StartVotingRound(vote) if err != nil { t.Error(err) } }
func TestTCPStaticConfig(t *testing.T) { // not mixing view changes in RoundsPerView := 100 time.Sleep(5 * time.Second) hc, err := oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Error(err) } for _, n := range hc.SNodes { n.RoundsPerView = RoundsPerView } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("hello world") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) log.Println("Test Done") }
func TestTreeFromBigConfig(t *testing.T) { // this test configuration HostList is incorrect -- duplicates are present return // not mixing view changes in RoundsPerView := 100 hc, err := oldconfig.LoadConfig("../test/data/exwax.json") if err != nil { t.Fatal(err) } for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("hello world") err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) if err != nil { t.Error(err) } }
// one after the other by the root (one signature per message created) func SimpleRoundsThroughput(N int, b *testing.B) { hc, _ := oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true}) hc.Run(false, sign.PubKey) for n := 0; n < b.N; n++ { for i := 0; i < N; i++ { hc.SNodes[0].LogTest = []byte("hello world" + strconv.Itoa(i)) hc.SNodes[0].Announce(DefaultView, &sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 0}) } for _, sn := range hc.SNodes { sn.Close() } } }
func runTreeSmallConfig(signType sign.Type, RoundsPerView int, suite abstract.Suite, failureRate int, faultyNodes ...int) error { var hc *oldconfig.HostConfig var err error opts := oldconfig.ConfigOptions{Suite: suite} if len(faultyNodes) > 0 { opts.Faulty = true } hc, err = oldconfig.LoadConfig("../test/data/exconf.json", opts) if err != nil { return err } for _, fh := range faultyNodes { fmt.Println("Setting", hc.SNodes[fh].Name(), "as faulty") if failureRate == 100 { hc.SNodes[fh].Host.(*coconet.FaultyHost).SetDeadFor("commit", true) } // hc.SNodes[fh].Host.(*coconet.FaultyHost).Die() } if len(faultyNodes) > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = failureRate } } for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, signType) if err != nil { return err } for _, sn := range hc.SNodes { defer sn.Close() } // Have root node initiate the signing protocol via a simple annoucement hc.SNodes[0].LogTest = []byte("Hello World") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) return nil }
// Configuration file data/exconf.json // 0 // / \ // 1 4 // / \ \ // 2 3 5 func TestTreeSmallConfigVote(t *testing.T) { hc, err := oldconfig.LoadConfig("../test/data/exconf.json") if err != nil { t.Fatal(err) } err = hc.Run(false, sign.Voter) if err != nil { t.Fatal(err) } // Achieve consensus on removing a node vote := &sign.Vote{Type: sign.AddVT, Av: &sign.AddVote{Name: "host5", Parent: "host4"}} err = hc.SNodes[0].StartVotingRound(vote) if err != nil { t.Error(err) } }
// Tests the integration of View Change with Signer (ability to reach consensus on a view change) // After achieving consensus, View is not actually changed, because of Signer test framework limitations // See tests in stamp/ for the actual view change ocurring // Go channels, static configuration, multiple rounds func TestViewChangeChan(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } hc, err := oldconfig.LoadConfig("../test/data/exconf.json") if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) // Have root node initiate the signing protocol // via a simple annoucement N := 6 for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) if err == sign.ChangingViewError { log.Println("Attempted round", i, "but received view change. waiting then retrying") time.Sleep(3 * time.Second) i-- continue } if err != nil { t.Error(err) } } }
// tree from configuration file data/exconf.json func TestMultipleRounds(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // not mixing view changes in RoundsPerView := 100 hc, err := oldconfig.LoadConfig("../test/data/exconf.json") if err != nil { t.Fatal(err) } N := 5 for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() // give it some time to set up time.Sleep(1 * time.Second) // Have root node initiate the signing protocol // via a simple annoucement for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) if err != nil { t.Error(err) } } }
func runTCPTimestampFromConfig(RoundsPerView int, signType, nMessages, nClients, nRounds, failureRate int, faultyNodes ...int) error { var hc *oldconfig.HostConfig var err error oldconfig.StartConfigPort += 2010 // load config with faulty or healthy hosts if len(faultyNodes) > 0 { hc, err = oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true, Faulty: true}) } else { hc, err = oldconfig.LoadConfig("../test/data/extcpconf.json", oldconfig.ConfigOptions{ConnType: "tcp", GenHosts: true}) } if err != nil { return err } // set FailureRates if len(faultyNodes) > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = failureRate } } for _, n := range hc.SNodes { n.RoundsPerView = RoundsPerView } err = hc.Run(true, sign.Type(signType)) if err != nil { return err } stampers, clients, err := hc.RunTimestamper(nClients) if err != nil { return err } for _, s := range stampers[1:] { go s.Run("regular", nRounds) } go stampers[0].Run("root", nRounds) log.Println("About to start sending client messages") for r := 1; r <= nRounds; r++ { var wg sync.WaitGroup for _, c := range clients { for i := 0; i < nMessages; i++ { messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) wg.Add(1) // CLIENT SENDING go func(c *stamp.Client, messg []byte, i int) { defer wg.Done() server := "NO VALID SERVER" retry: c.Mux.Lock() for k := range c.Servers { server = k break } c.Mux.Unlock() log.Infoln("timestamping") err := c.TimeStamp(messg, server) if err == stamp.ErrClientToTSTimeout { log.Errorln(err) return } if err != nil { time.Sleep(1 * time.Second) fmt.Println("retyring because err:", err) goto retry } log.Infoln("timestamped") }(c, messg, r) } } // wait between rounds wg.Wait() log.Println("done with round:", r, " of ", nRounds) } // give it some time before closing the connections // so that no essential messages are denied passing through the network time.Sleep(1 * time.Second) for _, h := range hc.SNodes { h.Close() } for _, c := range clients { c.Close() } return nil }
func TestGoConnTimestampFromConfig(t *testing.T) { oldconfig.StartConfigPort += 2010 nMessages := 1 nClients := 1 nRounds := 1 hc, err := oldconfig.LoadConfig("../test/data/exconf.json") if err != nil { t.Fatal(err) } for _, n := range hc.SNodes { n.RoundsPerView = 1000 } err = hc.Run(true, sign.MerkleTree) if err != nil { t.Fatal(err) } stampers, clients, err := hc.RunTimestamper(nClients) if err != nil { log.Fatal(err) } for _, s := range stampers[1:] { go s.Run("regular", nRounds) go s.ListenToClients() } go stampers[0].Run("root", nRounds) go stampers[0].ListenToClients() log.Println("About to start sending client messages") time.Sleep(1 * time.Second) for r := 0; r < nRounds; r++ { var wg sync.WaitGroup for _, c := range clients { for i := 0; i < nMessages; i++ { messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) wg.Add(1) go func(c *stamp.Client, messg []byte, i int) { defer wg.Done() server := "NO VALID SERVER" c.Mux.Lock() for k := range c.Servers { server = k break } c.Mux.Unlock() c.TimeStamp(messg, server) }(c, messg, r) } } // wait between rounds wg.Wait() fmt.Println("done with round:", r, nRounds) } // give it some time before closing the connections // so that no essential messages are denied passing through the network time.Sleep(5 * time.Second) for _, h := range hc.SNodes { h.Close() } for _, c := range clients { c.Close() } }
// # Messages per round, # rounds, failure rate[0..100], list of faulty nodes func runTSSIntegration(RoundsPerView, nMessages, nRounds, failureRate, failAsRootEvery, failAsFollowerEvery int, faultyNodes ...int) error { //stamp.ROUND_TIME = 1 * time.Second var hostConfig *oldconfig.HostConfig var err error // load config with faulty or healthy hosts opts := oldconfig.ConfigOptions{} if len(faultyNodes) > 0 { opts.Faulty = true } hostConfig, err = oldconfig.LoadConfig("../test/data/exconf.json", opts) if err != nil { return err } log.Printf("load config returned dir: %p", hostConfig.Dir) // set FailureRates as pure percentages if len(faultyNodes) > 0 { for i := range hostConfig.SNodes { hostConfig.SNodes[i].FailureRate = failureRate } } // set root failures if failAsRootEvery > 0 { for i := range hostConfig.SNodes { hostConfig.SNodes[i].FailAsRootEvery = failAsRootEvery } } // set followerfailures for _, f := range faultyNodes { hostConfig.SNodes[f].FailAsFollowerEvery = failAsFollowerEvery } for _, n := range hostConfig.SNodes { n.RoundsPerView = RoundsPerView } err = hostConfig.Run(true, sign.MerkleTree) if err != nil { return err } // Connect all TSServers to their clients, except for root TSServer ncps := 3 // # clients per TSServer stampers := make([]*stamp.Server, len(hostConfig.SNodes)) for i := range stampers { stampers[i] = stamp.NewServer(hostConfig.SNodes[i]) defer func() { hostConfig.SNodes[i].Close() time.Sleep(1 * time.Second) }() } clientsLists := make([][]*stamp.Client, len(hostConfig.SNodes[1:])) for i, s := range stampers[1:] { clientsLists[i] = createClientsForTSServer(ncps, s, hostConfig.Dir, 0+i+ncps) } for i, s := range stampers[1:] { go s.Run("regular", nRounds) go s.ListenToClients() go func(clients []*stamp.Client, nRounds int, nMessages int, s *stamp.Server) { log.Println("clients Talk") time.Sleep(1 * time.Second) clientsTalk(clients, nRounds, nMessages, s) log.Println("Clients done Talking") }(clientsLists[i], nRounds, nMessages, s) } log.Println("RUNNING ROOT") stampers[0].ListenToClients() stampers[0].Run("root", nRounds) log.Println("Done running root") // After clients receive messages back we need a better way // of waiting to make sure servers check ElGamal sigs // time.Sleep(1 * time.Second) log.Println("DONE with test") return nil }
func Run(hostname, cfg, app string, rounds int, rootwait int, debug, testConnect bool, failureRate, rFail, fFail int, logger, suite string) { if debug { coco.DEBUG = true } // fmt.Println("EXEC TIMESTAMPER: " + hostname) if hostname == "" { fmt.Println("hostname is empty") log.Fatal("no hostname given") } // load the configuration //log.Println("loading configuration") var hc *oldconfig.HostConfig var err error s := GetSuite(suite) opts := oldconfig.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s} if failureRate > 0 || fFail > 0 { opts.Faulty = true } hc, err = oldconfig.LoadConfig(cfg, opts) if err != nil { fmt.Println(err) log.Fatal(err) } // set FailureRates if failureRate > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = failureRate } } // set root failures if rFail > 0 { for i := range hc.SNodes { hc.SNodes[i].FailAsRootEvery = rFail } } // set follower failures // a follower fails on %ffail round with failureRate probability for i := range hc.SNodes { hc.SNodes[i].FailAsFollowerEvery = fFail } // run this specific host // log.Println("RUNNING HOST CONFIG") err = hc.Run(app != "sign", sign.MerkleTree, hostname) if err != nil { log.Fatal(err) } defer func(sn *sign.Node) { log.Panicln("program has terminated:", hostname) sn.Close() }(hc.SNodes[0]) if app == "sign" { //log.Println("RUNNING Node") // if I am root do the announcement message if hc.SNodes[0].IsRoot(0) { time.Sleep(3 * time.Second) start := time.Now() iters := 10 for i := 0; i < iters; i++ { start = time.Now() //fmt.Println("ANNOUNCING") hc.SNodes[0].LogTest = []byte("Hello World") err = hc.SNodes[0].Announce(0, &sign.AnnouncementMessage{ LogTest: hc.SNodes[0].LogTest, Round: i}) if err != nil { log.Println(err) } elapsed := time.Since(start) log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_announce", "round": i, "time": elapsed, }).Info("") } } else { // otherwise wait a little bit (hopefully it finishes by the end of this) time.Sleep(30 * time.Second) } } else if app == "stamp" || app == "vote" { // log.Println("RUNNING TIMESTAMPER") stampers, _, err := hc.RunTimestamper(0, hostname) // get rid of the hc information so it can be GC'ed hc = nil if err != nil { log.Fatal(err) } for _, s := range stampers { // only listen if this is the hostname specified if s.Name() == hostname { s.Logger = logger s.Hostname = hostname s.App = app if s.IsRoot(0) { log.Println("RUNNING ROOT SERVER AT:", hostname, rounds) log.Printf("Waiting: %d s\n", rootwait) // wait for the other nodes to get set up time.Sleep(time.Duration(rootwait) * time.Second) log.Println("STARTING ROOT ROUND") s.Run("root", rounds) // log.Println("\n\nROOT DONE\n\n") } else if !testConnect { log.Println("RUNNING REGULAR AT:", hostname) s.Run("regular", rounds) // log.Println("\n\nREGULAR DONE\n\n") } else { // testing connection log.Println("RUNNING TEST_CONNNECT AT:", hostname) s.Run("test_connect", rounds) } } } } }