func (round *RoundStats) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { err := round.RoundStamperListener.SignatureBroadcast(in, out) if err == nil && round.IsRoot { dbg.Lvlf1("This is round %d with %d messages - %d since start.", round.RoundNbr, in.SBm.Messages, round.Node.Messages) } return err }
func ExampleLLvl() { dbg.Lvl1("Lvl output") dbg.LLvl1("LLvl output") dbg.Lvlf1("Lvlf output") dbg.LLvlf1("LLvlf output") // Output: // 1: ( dbg_test.ExampleLLvl: 0) - Lvl output // -1: ( dbg_test.ExampleLLvl: 0) - LLvl output // 1: ( dbg_test.ExampleLLvl: 0) - Lvlf output // -1: ( dbg_test.ExampleLLvl: 0) - LLvlf output }
func wait_for_blocks() { server := "localhost:2011" suite = app.GetSuite("25519") dbg.Lvl2("Connecting to", server) conn := coconet.NewTCPConn(server) err := conn.Connect() if err != nil { dbg.Fatal("Error when getting the connection to the host:", err) } dbg.Lvl1("Connected to ", server) for i := 0; i < 1000; i++ { time.Sleep(1 * time.Second) msg := &BitCoSi.BitCoSiMessage{ Type: BitCoSi.BlockRequestType, ReqNo: 0, } err = conn.PutData(msg) if err != nil { dbg.Fatal("Couldn't send hash-message to server: ", err) } dbg.Lvl1("Sent signature request") // Wait for the signed message tsm := new(BitCoSi.BitCoSiMessage) tsm.Brep = &BitCoSi.BlockReply{} tsm.Brep.SuiteStr = suite.String() err = conn.GetData(tsm) if err != nil { dbg.Fatal("Error while receiving signature:", err) } //dbg.Lvlf1("Got signature response %+v", tsm.Brep) T := new(BitCoSi.TrBlock) T.Block = tsm.Brep.Block T.Print() dbg.Lvlf1("Response %v ", tsm.Brep.Response) } // Asking to close the connection err = conn.PutData(&BitCoSi.BitCoSiMessage{ ReqNo: 1, Type: BitCoSi.BitCoSiClose, }) conn.Close() }
// Get gets data from the connection. // Returns io.EOF on an irrecoverable error. // Returns given error if it is Temporary. func (tc *TCPConn) GetData(bum BinaryUnmarshaler) error { if tc.Closed() { dbg.Lvl3("tcpconn: get: connection closed") return ErrClosed } tc.encLock.Lock() for tc.dec == nil { tc.encLock.Unlock() return ErrNotEstablished } dec := tc.dec tc.encLock.Unlock() //if Latency != 0 { // time.Sleep(time.Duration(rand.Intn(Latency)) * time.Millisecond) //} err := dec.Decode(bum) if err != nil { if IsTemporary(err) { dbg.Lvl2("Temporary error") return err } // if it is an irrecoverable error // close the channel and return that it has been closed if err == io.EOF || err.Error() == "read tcp4" { dbg.Lvl3("Closing connection by EOF:", err) } else { if !strings.Contains(err.Error(), "use of closed") { dbg.Lvl1("Couldn't decode packet at", tc.name, "error:", err) dbg.Lvlf1("Packet was: %+v", bum) } } tc.Close() return ErrClosed } return err }
// Creates the appropriate configuration-files and copies everything to the // deterlab-installation. func (d *Deterlab) Deploy(rc RunConfig) error { dbg.Lvlf1("Next run is %+v", rc) os.RemoveAll(d.DeployDir) os.Mkdir(d.DeployDir, 0777) dbg.Lvl3("Writing config-files") // Initialize the deter-struct with our current structure (for debug-levels // and such), then read in the app-configuration to overwrite eventual // 'Machines', 'ppm', '' or other fields deter := *d appConfig := d.DeployDir + "/app.toml" deterConfig := d.DeployDir + "/deter.toml" ioutil.WriteFile(appConfig, rc.Toml(), 0666) deter.ReadConfig(appConfig) deter.createHosts() d.MasterLogger = deter.MasterLogger app.WriteTomlConfig(deter, deterConfig) // Prepare special configuration preparation for each application - the // reading in twice of the configuration file, once for the deterConfig, // then for the appConfig, sets the deterConfig as defaults and overwrites // everything else with the actual appConfig (which comes from the // runconfig-file) switch d.App { case "sign", "stamp": conf := app.ConfigColl{} conf.StampsPerRound = -1 conf.StampRatio = 1.0 app.ReadTomlConfig(&conf, deterConfig) app.ReadTomlConfig(&conf, appConfig) // Calculates a tree that is used for the timestampers var depth int conf.Tree, conf.Hosts, depth, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Bf) dbg.Lvl2("Depth:", depth) dbg.Lvl2("Total peers:", len(conf.Hosts)) total := deter.Machines * conf.Ppm if len(conf.Hosts) != total { dbg.Fatal("Only calculated", len(conf.Hosts), "out of", total, "hosts - try changing number of", "machines or hosts per node") } deter.Hostnames = conf.Hosts // re-write the new configuration-file app.WriteTomlConfig(conf, appConfig) case "skeleton": conf := app.ConfigSkeleton{} app.ReadTomlConfig(&conf, deterConfig) app.ReadTomlConfig(&conf, appConfig) // Calculates a tree that is used for the timestampers var depth int conf.Tree, conf.Hosts, depth, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Bf) dbg.Lvl2("Depth:", depth) dbg.Lvl2("Total peers:", len(conf.Hosts)) total := deter.Machines * conf.Ppm if len(conf.Hosts) != total { dbg.Fatal("Only calculated", len(conf.Hosts), "out of", total, "hosts - try changing number of", "machines or hosts per node") } deter.Hostnames = conf.Hosts // re-write the new configuration-file app.WriteTomlConfig(conf, appConfig) case "shamir": conf := app.ConfigShamir{} app.ReadTomlConfig(&conf, deterConfig) app.ReadTomlConfig(&conf, appConfig) _, conf.Hosts, _, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Ppm) deter.Hostnames = conf.Hosts // re-write the new configuration-file app.WriteTomlConfig(conf, appConfig) case "naive": conf := app.NaiveConfig{} app.ReadTomlConfig(&conf, deterConfig) app.ReadTomlConfig(&conf, appConfig) _, conf.Hosts, _, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, 2) deter.Hostnames = conf.Hosts dbg.Lvl3("Deterlab: naive applications:", conf.Hosts) dbg.Lvl3("Deterlab: naive app config:", conf) dbg.Lvl3("Deterlab: naive app virt:", deter.Virt[:]) deter.Hostnames = conf.Hosts app.WriteTomlConfig(conf, appConfig) case "ntree": conf := app.NTreeConfig{} app.ReadTomlConfig(&conf, deterConfig) app.ReadTomlConfig(&conf, appConfig) var depth int conf.Tree, conf.Hosts, depth, _ = graphs.TreeFromList(deter.Virt[:], conf.Ppm, conf.Bf) dbg.Lvl2("Depth:", depth) deter.Hostnames = conf.Hosts app.WriteTomlConfig(conf, appConfig) case "randhound": } app.WriteTomlConfig(deter, "deter.toml", d.DeployDir) // copy the webfile-directory of the logserver to the remote directory err := exec.Command("cp", "-a", d.DeterDir+"/cothority.conf", d.DeployDir).Run() if err != nil { dbg.Fatal("error copying webfiles:", err) } build, err := ioutil.ReadDir(d.BuildDir) for _, file := range build { err = exec.Command("cp", d.BuildDir+"/"+file.Name(), d.DeployDir).Run() if err != nil { dbg.Fatal("error copying build-file:", err) } } dbg.Lvl1("Copying over to", d.Login, "@", d.Host) // Copy everything over to Deterlabs err = cliutils.Rsync(d.Login, d.Host, d.DeployDir+"/", "remote/") if err != nil { dbg.Fatal(err) } dbg.Lvl2("Done copying") return nil }
func main() { conf := &app.ConfigColl{} app.ReadConfig(conf) // we must know who we are if app.RunFlags.Hostname == "" { dbg.Fatal("Hostname empty: Abort") } // Do some common setup if app.RunFlags.Mode == "client" { app.RunFlags.Hostname = app.RunFlags.Name } hostname := app.RunFlags.Hostname if hostname == conf.Hosts[0] { dbg.Lvlf3("Tree is %+v", conf.Tree) } dbg.Lvl3(hostname, "Starting to run") app.RunFlags.StartedUp(len(conf.Hosts)) peer := conode.NewPeer(hostname, conf.ConfigConode) Releases = make(map[string]CommitEntry) //ReleaseInformation() ReadRelease(PolicyFile, SignaturesFile, CommitIdFile) if app.RunFlags.AmRoot { err := peer.WaitRoundSetup(len(conf.Hosts), 5, 2) if err != nil { dbg.Fatal(err) } dbg.Lvl1("Starting the rounds") } if app.RunFlags.AmRoot { for round := 0; round < conf.Rounds; round++ { dbg.Lvl1("Doing round", round, "of", conf.Rounds) wallTime := monitor.NewMeasure("round") hashToSign, _ := CommitScanner(CommitIdFile) // retrieve commitid/hash that the root is willing to get signed entry := Releases[hashToSign] if entry.policy != "" && entry.signatures != "" { rootpgpTime := monitor.NewMeasure("rootpgp") decision, err := ApprovalCheck(entry.policy, entry.signatures, hashToSign) rootpgpTime.Measure() if decision && err == nil { round := NewRoundSwsign(peer.Node) round.Hash = []byte(hashToSign) // passing hash of the file that we want to produce a signature for peer.StartAnnouncement(round) wallTime.Measure() Signature := <-round.Signature dbg.Lvlf1("Received signature %+v", Signature) } else { dbg.Fatal("Developers related to the root haven't approved the release so the root didn't start signing process") } } else { dbg.Error("There is no input with such commitid", hashToSign) } } peer.SendCloseAll() } else { peer.LoopRounds(RoundSwsignType, conf.Rounds) } dbg.Lvlf3("Done - flags are %+v", app.RunFlags) monitor.End() }
// listen for clients connections func (s *StampListener) ListenRequests() error { dbg.Lvl3("Setup StampListener on", s.NameL) global, _ := cliutils.GlobalBind(s.NameL) var err error s.Port, err = net.Listen("tcp4", global) if err != nil { panic(err) } go func() { for { dbg.Lvlf2("Listening to sign-requests: %p", s) conn, err := s.Port.Accept() if err != nil { // handle error dbg.Lvl3("failed to accept connection") select { case w := <-s.waitClose: dbg.Lvl3("Closing stamplistener:", w) return default: continue } } dbg.Lvl3("Waiting for connection") c := coconet.NewTCPConnFromNet(conn) if _, ok := s.Clients[c.Name()]; !ok { s.Clients[c.Name()] = c go func(co coconet.Conn) { for { tsm := TimeStampMessage{} err := co.GetData(&tsm) dbg.Lvlf2("Got data to sign %+v - %+v", tsm, tsm.Sreq) if err != nil { dbg.Lvlf1("%p Failed to get from child: %s", s.NameL, err) co.Close() return } switch tsm.Type { default: dbg.Lvlf1("Message of unknown type: %v\n", tsm.Type) case StampRequestType: s.Mux.Lock() s.Queue[READING] = append(s.Queue[READING], MustReplyMessage{Tsm: tsm, To: co.Name()}) s.Mux.Unlock() case StampClose: dbg.Lvl2("Closing connection") co.Close() return case StampExit: dbg.Lvl2("Exiting server upon request") os.Exit(-1) } } }(c) } } }() return nil }