// Tests the integration of View Change with Signer (ability to reach consensus on a view change) // After achieving consensus, View is not actually changed, because of Signer test framework limitations // See tests in coll_stamp/ for the actual view change ocurring func TestViewChangeTCP(t *testing.T) { time.Sleep(5 * time.Second) if testing.Short() { t.Skip("skipping test in short mode.") } hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Fatal("error loading configuration: ", err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal("error running:", err) } // give it some time to set up time.Sleep(2 * time.Second) N := 6 for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("hello world") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) } }
func TestTCPStaticConfig(t *testing.T) { // not mixing view changes in RoundsPerView := 100 time.Sleep(5 * time.Second) hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Error(err) } for _, n := range hc.SNodes { n.RoundsPerView = RoundsPerView } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("hello world") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) log.Println("Test Done") }
func TestTreeFromBigConfig(t *testing.T) { // this test configuration HostList is incorrect -- duplicates are present return // not mixing view changes in RoundsPerView := 100 hc, err := config.LoadConfig("testdata/exwax.json") if err != nil { t.Fatal(err) } for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("hello world") err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) if err != nil { t.Error(err) } }
func TestTCPStaticConfigVote(t *testing.T) { hc, err := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) if err != nil { t.Error(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.Voter) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) hc.SNodes[0].LogTest = []byte("Hello Voting") vote := &sign.Vote{Type: sign.RemoveVT, Rv: &sign.RemoveVote{Name: "host5", Parent: "host4"}} err = hc.SNodes[0].StartVotingRound(vote) if err != nil { t.Error(err) } }
// one after the other by the root (one signature per message created) func SimpleRoundsThroughput(N int, b *testing.B) { hc, _ := config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) hc.Run(false, sign.PubKey) for n := 0; n < b.N; n++ { for i := 0; i < N; i++ { hc.SNodes[0].LogTest = []byte("hello world" + strconv.Itoa(i)) hc.SNodes[0].Announce(DefaultView, &sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 0}) } for _, sn := range hc.SNodes { sn.Close() } } }
func runTreeSmallConfig(signType sign.Type, RoundsPerView int, suite abstract.Suite, failureRate int, faultyNodes ...int) error { var hc *config.HostConfig var err error opts := config.ConfigOptions{Suite: suite} if len(faultyNodes) > 0 { opts.Faulty = true } hc, err = config.LoadConfig("testdata/exconf.json", opts) if err != nil { return err } for _, fh := range faultyNodes { fmt.Println("Setting", hc.SNodes[fh].Name(), "as faulty") if failureRate == 100 { hc.SNodes[fh].Host.(*coconet.FaultyHost).SetDeadFor("commit", true) } // hc.SNodes[fh].Host.(*coconet.FaultyHost).Die() } if len(faultyNodes) > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = failureRate } } for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, signType) if err != nil { return err } for _, sn := range hc.SNodes { defer sn.Close() } // Have root node initiate the signing protocol via a simple annoucement hc.SNodes[0].LogTest = []byte("Hello World") hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: 1}) return nil }
// Configuration file data/exconf.json // 0 // / \ // 1 4 // / \ \ // 2 3 5 func TestTreeSmallConfigVote(t *testing.T) { hc, err := config.LoadConfig("testdata/exconf.json") if err != nil { t.Fatal(err) } err = hc.Run(false, sign.Voter) if err != nil { t.Fatal(err) } // Achieve consensus on removing a node vote := &sign.Vote{Type: sign.AddVT, Av: &sign.AddVote{Name: "host5", Parent: "host4"}} err = hc.SNodes[0].StartVotingRound(vote) if err != nil { t.Error(err) } }
// Tests the integration of View Change with Signer (ability to reach consensus on a view change) // After achieving consensus, View is not actually changed, because of Signer test framework limitations // See tests in coll_stamp/ for the actual view change ocurring // Go channels, static configuration, multiple rounds func TestViewChangeChan(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } hc, err := config.LoadConfig("testdata/exconf.json") if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } // give it some time to set up time.Sleep(2 * time.Second) // Have root node initiate the signing protocol // via a simple annoucement N := 6 for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) if err == sign.ChangingViewError { log.Println("Attempted round", i, "but received view change. waiting then retrying") time.Sleep(3 * time.Second) i-- continue } if err != nil { t.Error(err) } } }
// tree from configuration file data/exconf.json func TestMultipleRounds(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } // not mixing view changes in RoundsPerView := 100 hc, err := config.LoadConfig("testdata/exconf.json") if err != nil { t.Fatal(err) } N := 5 for _, sn := range hc.SNodes { sn.RoundsPerView = RoundsPerView } err = hc.Run(false, sign.MerkleTree) if err != nil { t.Fatal(err) } defer func() { for _, n := range hc.SNodes { n.Close() } time.Sleep(1 * time.Second) }() // give it some time to set up time.Sleep(1 * time.Second) // Have root node initiate the signing protocol // via a simple annoucement for i := 1; i <= N; i++ { hc.SNodes[0].LogTest = []byte("Hello World" + strconv.Itoa(i)) err = hc.SNodes[0].StartAnnouncement(&sign.AnnouncementMessage{LogTest: hc.SNodes[0].LogTest, Round: i}) if err != nil { t.Error(err) } } }
func runTCPTimestampFromConfig(RoundsPerView int, signType, nMessages, nClients, nRounds, failureRate int, faultyNodes ...int) error { var hc *config.HostConfig var err error config.StartConfigPort += 2010 // load config with faulty or healthy hosts if len(faultyNodes) > 0 { hc, err = config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true, Faulty: true}) } else { hc, err = config.LoadConfig("testdata/extcpconf.json", config.ConfigOptions{ConnType: "tcp", GenHosts: true}) } if err != nil { return err } // set FailureRates if len(faultyNodes) > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = failureRate } } for _, n := range hc.SNodes { n.RoundsPerView = RoundsPerView } err = hc.Run(true, sign.Type(signType)) if err != nil { return err } stampers, clients, err := hc.RunTimestamper(nClients) if err != nil { return err } for _, s := range stampers[1:] { go s.Run("regular", nRounds) } go stampers[0].Run("root", nRounds) log.Println("About to start sending client messages") for r := 1; r <= nRounds; r++ { var wg sync.WaitGroup for _, c := range clients { for i := 0; i < nMessages; i++ { messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) wg.Add(1) // CLIENT SENDING go func(c *stamp.Client, messg []byte, i int) { defer wg.Done() server := "NO VALID SERVER" retry: c.Mux.Lock() for k := range c.Servers { server = k break } c.Mux.Unlock() log.Infoln("timestamping") err := c.TimeStamp(messg, server) if err == stamp.ErrClientToTSTimeout { log.Errorln(err) return } if err != nil { time.Sleep(1 * time.Second) fmt.Println("retyring because err:", err) goto retry } log.Infoln("timestamped") }(c, messg, r) } } // wait between rounds wg.Wait() log.Println("done with round:", r, " of ", nRounds) } // give it some time before closing the connections // so that no essential messages are denied passing through the network time.Sleep(1 * time.Second) for _, h := range hc.SNodes { h.Close() } for _, c := range clients { c.Close() } return nil }
func TestGoConnTimestampFromConfig(t *testing.T) { config.StartConfigPort += 2010 nMessages := 1 nClients := 1 nRounds := 1 hc, err := config.LoadConfig("testdata/exconf.json") if err != nil { t.Fatal(err) } for _, n := range hc.SNodes { n.RoundsPerView = 1000 } err = hc.Run(true, sign.MerkleTree) if err != nil { t.Fatal(err) } stampers, clients, err := hc.RunTimestamper(nClients) if err != nil { log.Fatal(err) } for _, s := range stampers[1:] { go s.Run("regular", nRounds) go s.ListenToClients() } go stampers[0].Run("root", nRounds) go stampers[0].ListenToClients() log.Println("About to start sending client messages") time.Sleep(1 * time.Second) for r := 0; r < nRounds; r++ { var wg sync.WaitGroup for _, c := range clients { for i := 0; i < nMessages; i++ { messg := []byte("messg:" + strconv.Itoa(r) + "." + strconv.Itoa(i)) wg.Add(1) go func(c *stamp.Client, messg []byte, i int) { defer wg.Done() server := "NO VALID SERVER" c.Mux.Lock() for k := range c.Servers { server = k break } c.Mux.Unlock() c.TimeStamp(messg, server) }(c, messg, r) } } // wait between rounds wg.Wait() fmt.Println("done with round:", r, nRounds) } // give it some time before closing the connections // so that no essential messages are denied passing through the network time.Sleep(5 * time.Second) for _, h := range hc.SNodes { h.Close() } for _, c := range clients { c.Close() } }
// # Messages per round, # rounds, failure rate[0..100], list of faulty nodes func runTSSIntegration(RoundsPerView, nMessages, nRounds, failureRate, failAsRootEvery, failAsFollowerEvery int, faultyNodes ...int) error { //coll_stamp.ROUND_TIME = 1 * time.Second var hostConfig *config.HostConfig var err error // load config with faulty or healthy hosts opts := config.ConfigOptions{} if len(faultyNodes) > 0 { opts.Faulty = true } hostConfig, err = config.LoadConfig("testdata/exconf.json", opts) if err != nil { return err } log.Printf("load config returned dir: %p", hostConfig.Dir) // set FailureRates as pure percentages if len(faultyNodes) > 0 { for i := range hostConfig.SNodes { hostConfig.SNodes[i].FailureRate = failureRate } } // set root failures if failAsRootEvery > 0 { for i := range hostConfig.SNodes { hostConfig.SNodes[i].FailAsRootEvery = failAsRootEvery } } // set followerfailures for _, f := range faultyNodes { hostConfig.SNodes[f].FailAsFollowerEvery = failAsFollowerEvery } for _, n := range hostConfig.SNodes { n.RoundsPerView = RoundsPerView } err = hostConfig.Run(true, sign.MerkleTree) if err != nil { return err } // Connect all TSServers to their clients, except for root TSServer ncps := 3 // # clients per TSServer stampers := make([]*stamp.Server, len(hostConfig.SNodes)) for i := range stampers { stampers[i] = stamp.NewServer(hostConfig.SNodes[i]) defer func() { hostConfig.SNodes[i].Close() time.Sleep(1 * time.Second) }() } clientsLists := make([][]*stamp.Client, len(hostConfig.SNodes[1:])) for i, s := range stampers[1:] { clientsLists[i] = createClientsForTSServer(ncps, s, hostConfig.Dir, 0+i+ncps) } for i, s := range stampers[1:] { go s.Run("regular", nRounds) go s.ListenToClients() go func(clients []*stamp.Client, nRounds int, nMessages int, s *stamp.Server) { log.Println("clients Talk") time.Sleep(1 * time.Second) clientsTalk(clients, nRounds, nMessages, s) log.Println("Clients done Talking") }(clientsLists[i], nRounds, nMessages, s) } log.Println("RUNNING ROOT") stampers[0].ListenToClients() stampers[0].Run("root", nRounds) log.Println("Done running root") // After clients receive messages back we need a better way // of waiting to make sure servers check ElGamal sigs // time.Sleep(1 * time.Second) log.Println("DONE with test") return nil }
// Dispatch-function for running either client or server (mode-parameter) func Run(app *config.AppConfig, conf *deploy.Config) { // Do some common setup if app.Mode == "client" { app.Hostname = app.Name } dbg.Lvl3(app.Hostname, "Starting to run") if conf.Debug > 1 { sign.DEBUG = true } if app.Hostname == "" { log.Fatal("no hostname given", app.Hostname) } // load the configuration dbg.Lvl3("loading configuration for", app.Hostname) var hc *config.HostConfig var err error s := GetSuite(conf.Suite) opts := config.ConfigOptions{ConnType: "tcp", Host: app.Hostname, Suite: s} if conf.Failures > 0 || conf.FFail > 0 { opts.Faulty = true } hc, err = config.LoadConfig("tree.json", opts) if err != nil { fmt.Println(err) log.Fatal(err) } // Wait for everybody to be ready before going on ioutil.WriteFile("coll_stamp_up/up"+app.Hostname, []byte("started"), 0666) for { _, err := os.Stat("coll_stamp_up") if err == nil { files, _ := ioutil.ReadDir("coll_stamp_up") dbg.Lvl4(app.Hostname, "waiting for others to finish", len(files)) time.Sleep(time.Second) } else { break } } dbg.Lvl2(app.Hostname, "thinks everybody's here") // set FailureRates if conf.Failures > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = conf.Failures } } // set root failures if conf.RFail > 0 { for i := range hc.SNodes { hc.SNodes[i].FailAsRootEvery = conf.RFail } } // set follower failures // a follower fails on %ffail round with failureRate probability for i := range hc.SNodes { hc.SNodes[i].FailAsFollowerEvery = conf.FFail } defer func() { dbg.Lvl1("Collective Signing", app.Hostname, "has terminated in mode", app.Mode) }() switch app.Mode { case "client": log.Panic("No client mode") case "server": RunServer(app, conf, hc) } }
func RunServer(hostname, app string, rounds int, rootwait int, debug int, testConnect bool, failureRate, rFail, fFail int, logger, suite string) { dbg.Lvl3(hostname, "Starting to run") if debug > 1 { sign.DEBUG = true } // fmt.Println("EXEC TIMESTAMPER: " + hostname) if hostname == "" { log.Fatal("no hostname given") } // load the configuration //dbg.Lvl3("loading configuration") var hc *config.HostConfig var err error s := GetSuite(suite) opts := config.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s} if failureRate > 0 || fFail > 0 { opts.Faulty = true } configTime := time.Now() hc, err = config.LoadConfig("tree.json", opts) if err != nil { fmt.Println(err) log.Fatal(err) } dbg.Lvl3(hostname, "finished loading config after", time.Since(configTime)) for i := range hc.SNodes { // set FailureRates if failureRate > 0 { hc.SNodes[i].FailureRate = failureRate } // set root failures if rFail > 0 { hc.SNodes[i].FailAsRootEvery = rFail } // set follower failures // a follower fails on %ffail round with failureRate probability hc.SNodes[i].FailAsFollowerEvery = fFail } // Wait for everybody to be ready before going on ioutil.WriteFile("coll_stamp_up/up"+hostname, []byte("started"), 0666) for { _, err := os.Stat("coll_stamp_up") if err == nil { dbg.Lvl4(hostname, "waiting for others to finish") time.Sleep(time.Second) } else { break } } dbg.Lvl3(hostname, "thinks everybody's here") err = hc.Run(app != "coll_sign", sign.MerkleTree, hostname) if err != nil { log.Fatal(err) } defer func(sn *sign.Node) { //log.Panicln("program has terminated:", hostname) dbg.Lvl1("Program timestamper has terminated:", hostname) sn.Close() }(hc.SNodes[0]) stampers, _, err := RunTimestamper(hc, 0, hostname) // get rid of the hc information so it can be GC'ed hc = nil if err != nil { log.Fatal(err) } for _, s := range stampers { // only listen if this is the hostname specified if s.Name() == hostname { s.Logger = logger s.Hostname = hostname s.App = app if s.IsRoot(0) { dbg.Lvl1("Root timestamper at:", hostname, rounds, "Waiting: ", rootwait) // wait for the other nodes to get set up time.Sleep(time.Duration(rootwait) * time.Second) dbg.Lvl1("Starting root-round") s.Run("root", rounds) // dbg.Lvl3("\n\nROOT DONE\n\n") } else if !testConnect { dbg.Lvl2("Running regular timestamper on:", hostname) s.Run("regular", rounds) // dbg.Lvl1("\n\nREGULAR DONE\n\n") } else { // testing connection dbg.Lvl1("Running connection-test on:", hostname) s.Run("test_connect", rounds) } } } }