// Wait for the end of the alo so we can close connection nicely func (p *Peer) WaitFins() { p.wgFin.Add(len(p.remote)) fn := func(rp RemotePeer) { f := Finish{p.Id} err := poly.SUITE.Write(rp.Conn, &f) if err != nil { dbg.Fatal(p.String(), "could not send FIN to ", rp.String()) } p.wgFin.Done() } p.ForRemotePeers(fn) dbg.Lvl2(p.String(), "waiting to send all FIN's packets") p.wgFin.Wait() // close all connections for _, rp := range p.remote { rp.Conn.Close() } dbg.Lvl2(p.String(), "close every connections") //for { // f := <-p.finChan // rp, ok := p.remote[f.Id] // if !ok { // dbg.Lvl2(p.Name, "received invalid FIN : wrong ID ", rp.Id, " ... ") // } else { // rp.Conn.Close() // dbg.Lvl2(p.Name, "received FIN from ", rp.String(), " => closed connection") // } //} }
func (sn *Node) CloseAll(view int) error { dbg.Lvl2(sn.Name(), "received CloseAll on", view) // At the leaves if len(sn.Children(view)) == 0 { dbg.Lvl2(sn.Name(), "in CloseAll is root leaf") } else { dbg.Lvl2(sn.Name(), "in CloseAll is calling", len(sn.Children(view)), "children") // Inform all children of announcement messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { sm := SigningMessage{ Type: CloseAll, View: view, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), } messgs[i] = &sm } ctx := context.TODO() if err := sn.PutDown(ctx, view, messgs); err != nil { return err } sn.Close() } log.Fatal("Closing down shop") return nil }
// ComputeSharedSecret will make the exchange of dealers between // the peers and will compute the sharedsecret at the end func (p *Peer) ComputeSharedSecret() *poly.SharedSecret { // Construct the dealer dealerKey := cliutils.KeyPair(poly.SUITE) dealer := poly.NewDealer(p.info, &p.key, &dealerKey, p.pubKeys) // Construct the receiver receiver := poly.NewReceiver(p.info, &p.key) // add already its own dealer _, err := receiver.AddDealer(p.Id, dealer) if err != nil { dbg.Fatal(p.String(), "could not add its own dealer >< ABORT") } // Send the dealer struct TO every one err = p.SendToAll(dealer) dbg.Lvl2(p.Name, "sent its dealer to every peers. (err = ", err, ")") // Receive the dealer struct FROM every one // wait with a chan to get ALL dealers dealChan := make(chan *poly.Dealer) for _, rp := range p.remote { go func(rp RemotePeer) { d := new(poly.Dealer).UnmarshalInit(p.info) err := poly.SUITE.Read(rp.Conn, d) if err != nil { dbg.Fatal(p.Name, " received a strange dealer from ", rp.String()) } dealChan <- d }(rp) } // wait to get all dealers dbg.Lvl3(p.Name, "wait to receive every other peer's dealer...") n := 0 for { // get the dealer and add it d := <-dealChan dbg.Lvl3(p.Name, "collected one more dealer (count = ", n, ")") // TODO: get the response back to the dealer _, err := receiver.AddDealer(p.Id, d) if err != nil { dbg.Fatal(p.Name, "has error when adding the dealer : ", err) } n += 1 // we get enough dealers to compute the shared secret if n == p.info.T-1 { dbg.Lvl2(p.Name, "received every Dealers") break } } sh, err := receiver.ProduceSharedSecret() if err != nil { dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err ", err, ")") } dbg.Lvl2(p.Name, "produced shared secret !") return sh }
// WaitAcks will make a peer waits for all others peers to send an ACK to it func (p *Peer) WaitACKs() { var wg sync.WaitGroup fn := func(rp RemotePeer) { a := Ack{} err := poly.SUITE.Read(rp.Conn, &a) if err != nil { dbg.Fatal(p.Name, "could not receive an ACK from ", rp.String(), " (err ", err, ")") } //p.ackChan <- a wg.Done() } wg.Add(len(p.remote)) p.ForRemotePeers(fn) dbg.Lvl3(p.Name, "is waiting for acks ...") wg.Wait() dbg.Lvl2(p.String(), "received ALL ACKs") //n := 0 //for { // a := <-p.ackChan // if a.Valid { // n += 1 // } // if n == p.info.N-1 { // dbg.Lvl2(p.Name, "received all acks. Continue") // break // } //} }
// Get gets data from the connection. // Returns io.EOF on an irrecoveralbe error. // Returns given error if it is Temporary. func (tc *TCPConn) Get(bum BinaryUnmarshaler) error { if tc.Closed() { dbg.Lvl3("tcpconn: get: connection closed") return ErrClosed } tc.encLock.Lock() for tc.dec == nil { tc.encLock.Unlock() return ErrNotEstablished } dec := tc.dec tc.encLock.Unlock() if Latency != 0 { time.Sleep(time.Duration(rand.Intn(Latency)) * time.Millisecond) } err := dec.Decode(bum) if err != nil { if IsTemporary(err) { return err } // if it is an irrecoverable error // close the channel and return that it has been closed if err != io.EOF && err.Error() != "read tcp4" { dbg.Lvl2("Couldn't decode packet at", tc.name, "error:", err) } else { dbg.Lvl3("Closing connection by EOF") } tc.Close() return ErrClosed } return err }
// ConnectTo will connect to the given host and start the SYN exchange (public key + id) func (p *Peer) ConnectTo(host string) error { tick := time.NewTicker(ConnWaitRetry) count := 0 for range tick.C { // connect conn, err := net.Dial("tcp", host) if err != nil { // we have tried too many times => abort if count == ConnRetry { tick.Stop() dbg.Fatal(p.Name, "could not connect to", host, " ", ConnRetry, "times. Abort.") // let's try again one more time } else { dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in ", ConnWaitRetry.String()) count += 1 } } // handle successful connection dbg.Lvl3(p.Name, "has connected with peer ", host) tick.Stop() // start to syn with the respective peer go p.synWithPeer(conn) break } return nil }
// BroadcastSIgnature will broadcast the given signature to every other peer // AND will retrieve the signature of every other peer also ! func (p *Peer) BroadcastSignature(s *poly.SchnorrSig) []*poly.SchnorrSig { arr := make([]*poly.SchnorrSig, 0, p.info.N) arr = append(arr, s) err := p.SendToAll(s) if err != nil { dbg.Fatal(p.String(), "could not sent to everyone its schnorr sig") } sigChan := make(chan *poly.SchnorrSig) fn := func(rp RemotePeer) { sch := new(poly.SchnorrSig).Init(p.info) err := poly.SUITE.Read(rp.Conn, sch) if err != nil { dbg.Fatal(p.String(), "could not decode schnorr sig from ", rp.String()) } sigChan <- sch } // wait for every peers's schnorr sig p.ForRemotePeers(fn) n := 0 for { sig := <-sigChan arr = append(arr, sig) n += 1 if n == p.info.N-1 { dbg.Lvl2(p.String(), "received every other schnorr sig.") break } } return arr }
// Calculates a tree that is used for the timestampers func (d *Deter) calculateGraph() { d.virt = d.virt[d.Config.Nloggers:] d.phys = d.phys[d.Config.Nloggers:] t, hostnames, depth, err := graphs.TreeFromList(d.virt, d.Config.Hpn, d.Config.Bf) dbg.Lvl2("Depth:", depth) dbg.Lvl2("Total hosts:", len(hostnames)) total := d.Config.Nmachs * d.Config.Hpn if len(hostnames) != total { dbg.Lvl1("Only calculated", len(hostnames), "out of", total, "hosts - try changing number of", "machines or hosts per node") log.Fatal("Didn't calculate enough hosts") } // generate the configuration file from the tree cf := config.ConfigFromTree(t, hostnames) cfb, err := json.Marshal(cf) err = ioutil.WriteFile(d.DeployDir+"/tree.json", cfb, 0666) if err != nil { log.Fatal(err) } }
// WaitSYNs will wait until every peers has syn'd with this one func (p *Peer) WaitSYNs() { for { s := <-p.synChan dbg.Lvl3(p.Name, " synChan received Syn id ", s.Id) _, ok := p.remote[s.Id] if !ok { dbg.Fatal(p.Name, "received syn'd notification of an unknown peer... ABORT") } if len(p.remote) == p.info.N-1 { dbg.Lvl2(p.Name, "is SYN'd with every one") break } } }
// SchnorrSigRoot will first generate a // random shared secret, then start a new round // It will wait for the partial sig of the peers // to finally render a SchnorrSig struct func (p *Peer) SchnorrSigRoot(msg []byte) *poly.SchnorrSig { // First, gen. a random secret random := p.ComputeSharedSecret() // launch the new round err := p.schnorr.NewRound(random, msg) if err != nil { dbg.Fatal(p.String(), "could not make a new round : ", err) } // compute its own share of the signature ps := p.schnorr.RevealPartialSig() // add its own p.schnorr.AddPartialSig(ps) // no need to send to all if you are the root // p.SendToAll(ps) // then receive every partial sig sigChan := make(chan *poly.PartialSchnorrSig) fn := func(rp RemotePeer) { psig := new(poly.PartialSchnorrSig) err := poly.SUITE.Read(rp.Conn, psig) if err != nil { dbg.Fatal(p.String(), "could not decode PartialSig of ", rp.String()) } sigChan <- psig } p.ForRemotePeers(fn) // wait for all partial sig to be received n := 0 for { psig := <-sigChan err := p.schnorr.AddPartialSig(psig) if err != nil { dbg.Fatal(p.String(), "could not add the partial signature received : ", err) } n += 1 if n == p.info.N-1 { dbg.Lvl2(p.String(), "received every other partial sig.") break } } sign, err := p.schnorr.SchnorrSig() if err != nil { dbg.Fatal(p.String(), "could not generate the global SchnorrSig", err) } return sign }
// hpn, bf, nmsgsG func RunTest(t T) (RunStats, error) { // add timeout for 10 minutes? done := make(chan struct{}) var rs RunStats cfg := &Config{ t.nmachs, deploy_config.Nloggers, t.hpn, t.bf, -1, t.rate, t.rounds, t.failures, t.rFail, t.fFail, deploy_config.Debug, deploy_config.RootWait, t.app, deploy_config.Suite} dbg.Lvl1("Running test with parameters", cfg) dbg.Lvl1("Failures percent is", t.failures) deployP.Configure(cfg) deployP.Deploy() err := deployP.Start() if err != nil { log.Fatal(err) return rs, nil } // give it a while to start up time.Sleep(10 * time.Second) go func() { rs = Monitor(t.bf) deployP.Stop() dbg.Lvl2("Test complete:", rs) done <- struct{}{} }() // timeout the command if it takes too long select { case <-done: if isZero(rs.MinTime) || isZero(rs.MaxTime) || isZero(rs.AvgTime) || math.IsNaN(rs.Rate) || math.IsInf(rs.Rate, 0) { return rs, errors.New(fmt.Sprintf("unable to get good data: %+v", rs)) } return rs, nil /* No time out for the moment case <-time.After(5 * time.Minute): return rs, errors.New("timed out") */ } }
// Dispatch-function for running either client or server (mode-parameter) func Run(app *config.AppConfig, depl *deploy.Config) { // we must know who we are if app.Hostname == "" { log.Fatal("Hostname empty : Abort") } dbg.Lvl2(app.Hostname, "Starting to run as ", app.Mode) var err error hosts, err := ReadHostsJson("tree.json") if err != nil { log.Fatal("Error while reading JSON hosts file on", app.Hostname, ". Abort") } switch app.Mode { case "client": RunClient(depl) case "server": RunServer(hosts, app, depl) } }
func (sn *Node) StatusConnections(view int, am *AnnouncementMessage) error { dbg.Lvl2(sn.Name(), "StatusConnected", view) // Ask connection-count on all connected children messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { sm := SigningMessage{ Type: StatusConnections, View: view, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), Am: am} messgs[i] = &sm } ctx := context.TODO() if err := sn.PutDown(ctx, view, messgs); err != nil { return err } if len(sn.Children(view)) == 0 { sn.Commit(view, am.Round, nil) } return nil }
// Monitor monitors log aggregates results into RunStats func Monitor(bf int) RunStats { dbg.Lvl1("Starting monitoring") defer dbg.Lvl1("Done monitoring") retry_dial: ws, err := websocket.Dial(fmt.Sprintf("ws://localhost:%d/log", port), "", "http://localhost/") if err != nil { time.Sleep(1 * time.Second) goto retry_dial } retry: // Get HTML of webpage for data (NHosts, Depth, ...) doc, err := goquery.NewDocument(fmt.Sprintf("http://localhost:%d/", port)) if err != nil { dbg.Lvl4("unable to get log data: retrying:", err) time.Sleep(10 * time.Second) goto retry } nhosts := doc.Find("#numhosts").First().Text() dbg.Lvl4("hosts:", nhosts) depth := doc.Find("#depth").First().Text() dbg.Lvl4("depth:", depth) nh, err := strconv.Atoi(nhosts) if err != nil { log.Fatal("unable to convert hosts to be a number:", nhosts) } d, err := strconv.Atoi(depth) if err != nil { log.Fatal("unable to convert depth to be a number:", depth) } clientDone := false rootDone := false var rs RunStats rs.NHosts = nh rs.Depth = d rs.BF = bf var M, S float64 k := float64(1) first := true for { var data []byte err := websocket.Message.Receive(ws, &data) if err != nil { // if it is an eof error than stop reading if err == io.EOF { dbg.Lvl4("websocket terminated before emitting EOF or terminating string") break } continue } if bytes.Contains(data, []byte("EOF")) || bytes.Contains(data, []byte("terminating")) { dbg.Lvl2( "EOF/terminating Detected: need forkexec to report and clients: rootDone", rootDone, "clientDone", clientDone) } if bytes.Contains(data, []byte("root_round")) { dbg.Lvl4("root_round msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")") if clientDone || rootDone { dbg.Lvl4("Continuing searching data") // ignore after we have received our first EOF continue } var entry StatsEntry err := json.Unmarshal(data, &entry) if err != nil { log.Fatal("json unmarshalled improperly:", err) } if entry.Type != "root_round" { dbg.Lvl1("Wrong debugging message - ignoring") continue } dbg.Lvl4("root_round:", entry) if first { first = false dbg.Lvl4("Setting min-time to", entry.Time) rs.MinTime = entry.Time rs.MaxTime = entry.Time } if entry.Time < rs.MinTime { dbg.Lvl4("Setting min-time to", entry.Time) rs.MinTime = entry.Time } else if entry.Time > rs.MaxTime { rs.MaxTime = entry.Time } rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k var tM = M M += (entry.Time - tM) / k S += (entry.Time - tM) * (entry.Time - M) k++ rs.StdDev = math.Sqrt(S / (k - 1)) } else if bytes.Contains(data, []byte("schnorr_round")) { var entry StatsEntry err := json.Unmarshal(data, &entry) if err != nil { log.Fatal("json unmarshalled improperly:", err) } if entry.Type != "schnorr_round" { dbg.Lvl1("Wrong debugging message - ignoring") continue } dbg.Lvl4("schnorr_round:", entry) if first { first = false dbg.Lvl4("Setting min-time to", entry.Time) rs.MinTime = entry.Time rs.MaxTime = entry.Time } if entry.Time < rs.MinTime { dbg.Lvl4("Setting min-time to", entry.Time) rs.MinTime = entry.Time } else if entry.Time > rs.MaxTime { rs.MaxTime = entry.Time } rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k var tM = M M += (entry.Time - tM) / k S += (entry.Time - tM) * (entry.Time - M) k++ rs.StdDev = math.Sqrt(S / (k - 1)) } else if bytes.Contains(data, []byte("schnorr_end")) { break } else if bytes.Contains(data, []byte("forkexec")) { if rootDone { continue } var ss SysStats err := json.Unmarshal(data, &ss) if err != nil { log.Fatal("unable to unmarshal forkexec:", ss) } rs.SysTime = ss.SysTime rs.UserTime = ss.UserTime dbg.Lvl4("forkexec:", ss) rootDone = true dbg.Lvl2("Monitor() Forkexec msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")") if clientDone { break } } else if bytes.Contains(data, []byte("client_msg_stats")) { if clientDone { continue } var cms ClientMsgStats err := json.Unmarshal(data, &cms) if err != nil { log.Fatal("unable to unmarshal client_msg_stats:", string(data)) } // what do I want to keep out of the Client Message States // cms.Buckets stores how many were processed at time T // cms.RoundsAfter stores how many rounds delayed it was // // get the average delay (roundsAfter), max and min // get the total number of messages timestamped // get the average number of messages timestamped per second? avg, _, _, _ := ArrStats(cms.Buckets) // get the observed rate of processed messages // avg is how many messages per second, we want how many milliseconds between messages observed := avg / 1000 // set avg to messages per milliseconds observed = 1 / observed rs.Rate = observed rs.Times = cms.Times dbg.Lvl2("Monitor() Client Msg stats received (clientDone = ", clientDone, ",rootDone = ", rootDone, ")") clientDone = true if rootDone { break } } } return rs }
func main() { deter, err := deploy.ReadConfig() if err != nil { log.Fatal("Couldn't load config-file in forkexec:", err) } conf = deter.Config dbg.DebugVisible = conf.Debug flag.Parse() // connect with the logging server if logger != "" { // blocks until we can connect to the logger lh, err := logutils.NewLoggerHook(logger, physaddr, conf.App) if err != nil { log.WithFields(log.Fields{ "file": logutils.File(), }).Fatalln("Error setting up logging server:", err) } log.AddHook(lh) } setup_deter() i := 0 var wg sync.WaitGroup virts := physToServer[physaddr] if len(virts) > 0 { dbg.Lvl3("starting timestampers for", len(virts), "client(s)", virts) i = (i + 1) % len(loggerports) for _, name := range virts { dbg.Lvl4("Starting", name, "on", physaddr) wg.Add(1) go func(nameport string) { dbg.Lvl3("Running on", physaddr, "starting", nameport) defer wg.Done() args := []string{ "-hostname=" + nameport, "-logger=" + logger, "-physaddr=" + physaddr, "-amroot=" + strconv.FormatBool(nameport == rootname), "-test_connect=" + strconv.FormatBool(testConnect), "-mode=server", "-app=" + conf.App, } dbg.Lvl3("Starting on", physaddr, "with args", args) cmdApp := exec.Command("./app", args...) //cmd.Stdout = log.StandardLogger().Writer() //cmd.Stderr = log.StandardLogger().Writer() cmdApp.Stdout = os.Stdout cmdApp.Stderr = os.Stderr dbg.Lvl3("fork-exec is running command:", args) err = cmdApp.Run() if err != nil { dbg.Lvl2("cmd run:", err) } // get CPU usage stats st := cmdApp.ProcessState.SystemTime() ut := cmdApp.ProcessState.UserTime() log.WithFields(log.Fields{ "file": logutils.File(), "type": "forkexec", "systime": st, "usertime": ut, }).Info("") dbg.Lvl2("Finished with Timestamper", physaddr) }(name) } dbg.Lvl3(physaddr, "Finished starting timestampers") wg.Wait() } else { dbg.Lvl2("No timestampers for", physaddr) } dbg.Lvl2(physaddr, "timestampers exited") }
// Dispatch-function for running either client or server (mode-parameter) func Run(app *config.AppConfig, conf *deploy.Config) { // Do some common setup if app.Mode == "client" { app.Hostname = app.Name } dbg.Lvl3(app.Hostname, "Starting to run") if conf.Debug > 1 { sign.DEBUG = true } if app.Hostname == "" { log.Fatal("no hostname given", app.Hostname) } // load the configuration dbg.Lvl3("loading configuration for", app.Hostname) var hc *config.HostConfig var err error s := GetSuite(conf.Suite) opts := config.ConfigOptions{ConnType: "tcp", Host: app.Hostname, Suite: s} if conf.Failures > 0 || conf.FFail > 0 { opts.Faulty = true } hc, err = config.LoadConfig("tree.json", opts) if err != nil { fmt.Println(err) log.Fatal(err) } // Wait for everybody to be ready before going on ioutil.WriteFile("coll_stamp_up/up"+app.Hostname, []byte("started"), 0666) for { _, err := os.Stat("coll_stamp_up") if err == nil { files, _ := ioutil.ReadDir("coll_stamp_up") dbg.Lvl4(app.Hostname, "waiting for others to finish", len(files)) time.Sleep(time.Second) } else { break } } dbg.Lvl2(app.Hostname, "thinks everybody's here") // set FailureRates if conf.Failures > 0 { for i := range hc.SNodes { hc.SNodes[i].FailureRate = conf.Failures } } // set root failures if conf.RFail > 0 { for i := range hc.SNodes { hc.SNodes[i].FailAsRootEvery = conf.RFail } } // set follower failures // a follower fails on %ffail round with failureRate probability for i := range hc.SNodes { hc.SNodes[i].FailAsFollowerEvery = conf.FFail } defer func() { dbg.Lvl1("Collective Signing", app.Hostname, "has terminated in mode", app.Mode) }() switch app.Mode { case "client": log.Panic("No client mode") case "server": RunServer(app, conf, hc) } }
func RunServer(hosts *config.HostsConfig, app *config.AppConfig, depl *deploy.Config) { s := config.GetSuite(depl.Suite) poly.SUITE = s poly.SECURITY = poly.MODERATE n := len(hosts.Hosts) info := poly.PolyInfo{ N: n, R: n, T: n, } indexPeer := -1 for i, h := range hosts.Hosts { if h == app.Hostname { indexPeer = i break } } if indexPeer == -1 { log.Fatal("Peer ", app.Hostname, "(", app.PhysAddr, ") did not find any match for its name.Abort") } start := time.Now() dbg.Lvl1("Creating new peer ", app.Hostname, "(", app.PhysAddr, ") ...") // indexPeer == 0 <==> peer is root p := NewPeer(indexPeer, app.Hostname, info, indexPeer == 0) // make it listen dbg.Lvl2("Peer", app.Hostname, "is now listening for incoming connections") go p.Listen() // then connect it to its successor in the list for _, h := range hosts.Hosts[indexPeer+1:] { dbg.Lvl2("Peer ", app.Hostname, " will connect to ", h) // will connect and SYN with the remote peer p.ConnectTo(h) } // Wait until this peer is connected / SYN'd with each other peer p.WaitSYNs() if p.IsRoot() { delta := time.Since(start) dbg.Lvl2(p.String(), "Connections accomplished in", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_connect", "round": 0, "time": delta, }).Info("") } // start to record start = time.Now() // Setup the schnorr system amongst peers p.SetupDistributedSchnorr() p.SendACKs() p.WaitACKs() dbg.Lvl1(p.String(), "completed Schnorr setup") // send setup time if we're root if p.IsRoot() { delta := time.Since(start) dbg.Lvl2(p.String(), "setup accomplished in ", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_setup", "round": 0, "time": delta, }).Info("") } for round := 0; round < depl.Rounds; round++ { if p.IsRoot() { dbg.Lvl2("Starting round", round) } // Then issue a signature ! start = time.Now() msg := "hello world" // Only root calculates if it's OK and sends a log-message if p.IsRoot() { sig := p.SchnorrSigRoot([]byte(msg)) err := p.VerifySchnorrSig(sig, []byte(msg)) if err != nil { dbg.Fatal(p.String(), "could not verify schnorr signature :/ ", err) } dbg.Lvl2(p.String(), "verified the schnorr sig !") // record time delta := time.Since(start) dbg.Lvl2(p.String(), "signature done in ", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_round", "round": round, "time": delta, }).Info("") } else { // Compute the partial sig and send it to the root p.SchnorrSigPeer([]byte(msg)) } } p.WaitFins() dbg.Lvl1(p.String(), "is leaving ...") if p.IsRoot() { log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_end", }).Info("") } }
func RunServer(hostname, app string, rounds int, rootwait int, debug int, testConnect bool, failureRate, rFail, fFail int, logger, suite string) { dbg.Lvl3(hostname, "Starting to run") if debug > 1 { sign.DEBUG = true } // fmt.Println("EXEC TIMESTAMPER: " + hostname) if hostname == "" { log.Fatal("no hostname given") } // load the configuration //dbg.Lvl3("loading configuration") var hc *config.HostConfig var err error s := GetSuite(suite) opts := config.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s} if failureRate > 0 || fFail > 0 { opts.Faulty = true } configTime := time.Now() hc, err = config.LoadConfig("tree.json", opts) if err != nil { fmt.Println(err) log.Fatal(err) } dbg.Lvl3(hostname, "finished loading config after", time.Since(configTime)) for i := range hc.SNodes { // set FailureRates if failureRate > 0 { hc.SNodes[i].FailureRate = failureRate } // set root failures if rFail > 0 { hc.SNodes[i].FailAsRootEvery = rFail } // set follower failures // a follower fails on %ffail round with failureRate probability hc.SNodes[i].FailAsFollowerEvery = fFail } // Wait for everybody to be ready before going on ioutil.WriteFile("coll_stamp_up/up"+hostname, []byte("started"), 0666) for { _, err := os.Stat("coll_stamp_up") if err == nil { dbg.Lvl4(hostname, "waiting for others to finish") time.Sleep(time.Second) } else { break } } dbg.Lvl3(hostname, "thinks everybody's here") err = hc.Run(app != "coll_sign", sign.MerkleTree, hostname) if err != nil { log.Fatal(err) } defer func(sn *sign.Node) { //log.Panicln("program has terminated:", hostname) dbg.Lvl1("Program timestamper has terminated:", hostname) sn.Close() }(hc.SNodes[0]) stampers, _, err := RunTimestamper(hc, 0, hostname) // get rid of the hc information so it can be GC'ed hc = nil if err != nil { log.Fatal(err) } for _, s := range stampers { // only listen if this is the hostname specified if s.Name() == hostname { s.Logger = logger s.Hostname = hostname s.App = app if s.IsRoot(0) { dbg.Lvl1("Root timestamper at:", hostname, rounds, "Waiting: ", rootwait) // wait for the other nodes to get set up time.Sleep(time.Duration(rootwait) * time.Second) dbg.Lvl1("Starting root-round") s.Run("root", rounds) // dbg.Lvl3("\n\nROOT DONE\n\n") } else if !testConnect { dbg.Lvl2("Running regular timestamper on:", hostname) s.Run("regular", rounds) // dbg.Lvl1("\n\nREGULAR DONE\n\n") } else { // testing connection dbg.Lvl1("Running connection-test on:", hostname) s.Run("test_connect", rounds) } } } }