// getAddress gets the localhosts IPv4 address. func GetAddress() (string, error) { name, err := os.Hostname() if err != nil { log.Error("Error Resolving Hostname:", err) return "", err } if ipv4host == "NONE" { as, err := net.LookupHost(name) if err != nil { return "", err } addr := "" for _, a := range as { dbg.Lvl4("a = %+v", a) if ipv4Reg.MatchString(a) { dbg.Lvl4("matches") addr = a } } if addr == "" { err = errors.New("No IPv4 Address for Hostname") } return addr, err } return ipv4host, nil }
func RunClient(server string, nmsgs int, name string, rate int) { dbg.Lvl4("Starting to run stampclient") c := NewClient(name) servers := strings.Split(server, ",") // connect to all the servers listed for _, s := range servers { h, p, err := net.SplitHostPort(s) if err != nil { log.Fatal("improperly formatted host") } pn, _ := strconv.Atoi(p) c.AddServer(s, coconet.NewTCPConn(net.JoinHostPort(h, strconv.Itoa(pn+1)))) } // Check if somebody asks for the old way if rate < 0 { log.Fatal("Rounds based limiting deprecated") } // Stream time coll_stamp requests // if rate specified send out one message every rate milliseconds dbg.Lvl1(name, "starting to stream at rate", rate) streamMessgs(c, servers, rate) dbg.Lvl4("Finished streaming") return }
/* * Write the hosts.txt file automatically * from project name and number of servers */ func (d *Deter) generateHostsFile() error { hosts_file := d.DeployDir + "/hosts.txt" num_servers := d.Config.Nmachs + d.Config.Nloggers // open and erase file if needed if _, err1 := os.Stat(hosts_file); err1 == nil { dbg.Lvl4("Hosts file", hosts_file, "already exists. Erasing ...") os.Remove(hosts_file) } // create the file f, err := os.Create(hosts_file) if err != nil { log.Fatal("Could not create hosts file description: ", hosts_file, " :: ", err) return err } defer f.Close() // write the name of the server + \t + IP address ip := "10.255.0." name := "SAFER.isi.deterlab.net" for i := 1; i <= num_servers; i++ { f.WriteString(fmt.Sprintf("server-%d.%s.%s\t%s%d\n", i-1, d.Project, name, ip, i)) } dbg.Lvl4(fmt.Sprintf("Created hosts file description (%d hosts)", num_servers)) return err }
func (sn *Node) Announce(view int, am *AnnouncementMessage) error { dbg.Lvl4(sn.Name(), "received announcement on", view) if err := sn.TryFailure(view, am.Round); err != nil { return err } if err := sn.setUpRound(view, am); err != nil { return err } // Inform all children of announcement messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { sm := SigningMessage{ Type: Announcement, View: view, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), Am: am} messgs[i] = &sm } dbg.Lvl4(sn.Name(), "sending to all children") ctx := context.TODO() //ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) if err := sn.PutDown(ctx, view, messgs); err != nil { return err } // return sn.Commit(view, am) if len(sn.Children(view)) == 0 { sn.Commit(view, am.Round, nil) } return nil }
func (sn *Node) TryViewChange(view int) error { dbg.Lvl4(sn.Name(), "TRY VIEW CHANGE on", view, "with last view", sn.ViewNo) // should ideally be compare and swap sn.viewmu.Lock() if view <= sn.ViewNo { sn.viewmu.Unlock() return errors.New("trying to view change on previous/ current view") } if sn.ChangingView { sn.viewmu.Unlock() return ChangingViewError } sn.ChangingView = true sn.viewmu.Unlock() // take action if new view root if sn.Name() == sn.RootFor(view) { dbg.Lvl4(sn.Name(), "INITIATING VIEW CHANGE FOR VIEW:", view) go func() { err := sn.StartVotingRound( &Vote{ View: view, Type: ViewChangeVT, Vcv: &ViewChangeVote{ View: view, Root: sn.Name()}}) if err != nil { log.Errorln(sn.Name(), "TRY VIEW CHANGE FAILED: ", err) } }() } return nil }
// PutDown sends a message (an interface{} value) up to all children through // whatever 'network' interface each child Peer implements. func (h *TCPHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) error { // Try to send the message to all children // If at least one of the attempts fails, return a non-nil error var err error var errLock sync.Mutex children := h.views.Children(view) if len(data) != len(children) { panic("number of messages passed down != number of children") } var canceled int64 var wg sync.WaitGroup dbg.Lvl4(h.Name(), "sending to", len(children), "children") for i, c := range children { dbg.Lvl4("Sending to child", c) wg.Add(1) go func(i int, c string) { defer wg.Done() // try until it is canceled, successful, or timed-out for { // check to see if it has been canceled if atomic.LoadInt64(&canceled) == 1 { return } // if it is not Ready try again later h.PeerLock.Lock() Ready := h.Ready[c] conn := h.peers[c] h.PeerLock.Unlock() if Ready { if e := conn.Put(data[i]); e != nil { errLock.Lock() err = e errLock.Unlock() } dbg.Lvl4("Informed child", c) return } dbg.Lvl4("Re-trying, waiting to put down msg from", h.Name(), "to", c) time.Sleep(250 * time.Millisecond) } }(i, c) } done := make(chan struct{}) go func() { wg.Wait() done <- struct{}{} }() select { case <-done: case <-ctx.Done(): err = ctx.Err() atomic.StoreInt64(&canceled, 1) } return err }
func (d *Deter) Build(build string) error { dbg.Lvl1("Building for", d.Login, d.Host, d.Project, build) start := time.Now() var wg sync.WaitGroup // Start with a clean build-directory current, _ := os.Getwd() dbg.Lvl4("Current dir is:", current) defer os.Chdir(current) // Go into deterlab-dir and create the build-dir os.Chdir(d.DeterDir) os.RemoveAll(d.BuildDir) os.Mkdir(d.BuildDir, 0777) // start building the necessary packages packages := []string{"logserver", "forkexec", "../../app", "deter"} if build != "" { packages = strings.Split(build, ",") } dbg.Lvl3("Starting to build all executables", packages) for _, p := range packages { basename := path.Base(p) dbg.Lvl4("Building ", p, "into", basename) wg.Add(1) src := p + "/" + basename + ".go" dst := d.BuildDir + "/" + basename if p == "deter" { go func(s, d string) { defer wg.Done() // the users node has a 386 FreeBSD architecture out, err := cliutils.Build(s, d, "386", "freebsd") if err != nil { cliutils.KillGo() fmt.Println(out) log.Fatal(err) } }(src, dst) continue } go func(s, d string) { defer wg.Done() // deter has an amd64, linux architecture out, err := cliutils.Build(s, d, "amd64", "linux") if err != nil { cliutils.KillGo() fmt.Println(out) log.Fatal(err) } }(src, dst) } // wait for the build to finish wg.Wait() dbg.Lvl1("Build is finished after", time.Since(start)) return nil }
func (sn *Node) setUpRound(view int, am *AnnouncementMessage) error { // TODO: accept annoucements on old views?? linearizabiltity? sn.viewmu.Lock() // if (sn.ChangingView && am.Vote == nil) || (sn.ChangingView && am.Vote != nil && am.Vote.Vcv == nil) { // dbg.Lvl4(sn.Name(), "currently chaning view") // sn.viewmu.Unlock() // return ChangingViewError // } if sn.ChangingView && am.Vote != nil && am.Vote.Vcv == nil { dbg.Lvl4(sn.Name(), "currently chaning view") sn.viewmu.Unlock() return ChangingViewError } sn.viewmu.Unlock() sn.roundmu.Lock() Round := am.Round if Round <= sn.LastSeenRound { sn.roundmu.Unlock() return ErrPastRound } // make space for round type if len(sn.RoundTypes) <= Round { sn.RoundTypes = append(sn.RoundTypes, make([]RoundType, max(len(sn.RoundTypes), Round+1))...) } if am.Vote == nil { dbg.Lvl4(Round, len(sn.RoundTypes)) sn.RoundTypes[Round] = SigningRT } else { sn.RoundTypes[Round] = RoundType(am.Vote.Type) } sn.roundmu.Unlock() // set up commit and response channels for the new round sn.Rounds[Round] = NewRound(sn.suite) sn.initCommitCrypto(Round) sn.Rounds[Round].Vote = am.Vote // update max seen round sn.roundmu.Lock() sn.LastSeenRound = max(sn.LastSeenRound, Round) sn.roundmu.Unlock() // the root is the only node that keeps track of round # internally if sn.IsRoot(view) { sn.RoundsAsRoot += 1 // TODO: is sn.Round needed if we have LastSeenRound sn.Round = Round // Create my back link to previous round sn.SetBackLink(Round) // sn.SetAccountableRound(Round) } return nil }
func (s *Server) ConnectToLogger() { return if s.Logger == "" || s.Hostname == "" || s.App == "" { dbg.Lvl4("skipping connect to logger") return } dbg.Lvl4("Connecting to Logger") lh, _ := logutils.NewLoggerHook(s.Logger, s.Hostname, s.App) dbg.Lvl4("Connected to Logger") log.AddHook(lh) }
func (sn *Node) actOnResponses(view, Round int, exceptionV_hat abstract.Point, exceptionX_hat abstract.Point) error { dbg.Lvl4(sn.Name(), "got all responses for view, round", view, Round) round := sn.Rounds[Round] err := sn.VerifyResponses(view, Round) isroot := sn.IsRoot(view) // if error put it up if parent exists if err != nil && !isroot { sn.PutUpError(view, err) return err } // if no error send up own response if err == nil && !isroot { if round.Log.v == nil && sn.ShouldIFail("response") { dbg.Lvl4(sn.Name(), "failing on response") return nil } // create and putup own response message rm := &ResponseMessage{ R_hat: round.r_hat, ExceptionList: round.ExceptionList, ExceptionV_hat: exceptionV_hat, ExceptionX_hat: exceptionX_hat, Round: Round} // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) ctx := context.TODO() dbg.Lvl4(sn.Name(), "put up response to", sn.Parent(view)) err = sn.PutUp(ctx, view, &SigningMessage{ Type: Response, View: view, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), Rm: rm}) } else { dbg.Lvl4("Root received response") } if sn.TimeForViewChange() { dbg.Lvl4("acting on responses: trying viewchanges") err := sn.TryViewChange(view + 1) if err != nil { log.Errorln(err) } } // root reports round is done if isroot { sn.done <- Round } return err }
func ExampleLevel2() { dbg.Lvl1("Level1") dbg.Lvl3("Level2") dbg.Lvl4("Level3") dbg.Lvl4("Level4") dbg.Lvl5("Level5") // Output: // 1: ( debug_lvl_test.ExampleLevel2: 0) - Level1 // 2: ( debug_lvl_test.ExampleLevel2: 0) - Level2 }
func Build(path, out, goarch, goos string) (string, error) { var cmd *exec.Cmd var b bytes.Buffer build_buffer := bufio.NewWriter(&b) cmd = exec.Command("go", "build", "-v", "-o", out, path) dbg.Lvl4("Building", path) cmd.Stdout = build_buffer cmd.Stderr = build_buffer cmd.Env = append([]string{"GOOS=" + goos, "GOARCH=" + goarch}, os.Environ()...) wd, err := os.Getwd() dbg.Lvl4(wd) dbg.Lvl4("Command:", cmd.Args) err = cmd.Run() dbg.Lvl4(b.String()) return b.String(), err }
func NewReverseProxy(target *url.URL) *httputil.ReverseProxy { director := func(r *http.Request) { r.URL.Scheme = target.Scheme r.URL.Host = target.Host // get rid of the (/d/short_name)/debug of the url path requested // --> long_name/debug pathComp := strings.Split(r.URL.Path, "/") // remove the first two components /d/short_name pathComp = pathComp[3:] r.URL.Path = target.Path + "/" + strings.Join(pathComp, "/") dbg.Lvl4("redirected to: ", r.URL.String()) } dbg.Lvl4("setup reverse proxy for destination url:", target.Host, target.Path) return &httputil.ReverseProxy{Director: director} }
// Finalize commits by initiating the challenge pahse if root // Send own commitment message up to parent if non-root func (sn *Node) actOnCommits(view, Round int) error { round := sn.Rounds[Round] var err error if sn.IsRoot(view) { sn.commitsDone <- Round err = sn.FinalizeCommits(view, Round) } else { // create and putup own commit message com := &CommitmentMessage{ V: round.Log.V, V_hat: round.Log.V_hat, X_hat: round.X_hat, MTRoot: round.MTRoot, ExceptionList: round.ExceptionList, Vote: round.Vote, Round: Round} // ctx, _ := context.WithTimeout(context.Background(), 2000*time.Millisecond) dbg.Lvl4(sn.Name(), "puts up commit") ctx := context.TODO() err = sn.PutUp(ctx, view, &SigningMessage{ View: view, Type: Commitment, LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), Com: com}) } return err }
func (s *Server) LogReRun(nextRole string, curRole string) { if nextRole == "root" { var messg = s.Name() + " became root" if curRole == "root" { messg = s.Name() + " remained root" } go s.ConnectToLogger() log.WithFields(log.Fields{ "file": logutils.File(), "type": "role_change", }).Infoln(messg) // dbg.Lvl4("role change: %p", s) } else { var messg = s.Name() + " remained regular" if curRole == "root" { messg = s.Name() + " became regular" } if curRole == "root" { log.WithFields(log.Fields{ "file": logutils.File(), "type": "role_change", }).Infoln(messg) dbg.Lvl4("role change: %p", s) } } }
func (sn *Node) Close() { // sn.printRoundTypes() sn.hbLock.Lock() if sn.heartbeat != nil { sn.heartbeat.Stop() sn.heartbeat = nil dbg.Lvl4("after close", sn.Name(), "has heartbeat=", sn.heartbeat) } if !sn.isclosed { close(sn.closed) dbg.Lvl4("signing node: closing:", sn.Name()) sn.Host.Close() } sn.isclosed = true sn.hbLock.Unlock() }
func (sn *Node) StartGossip() { go func() { t := time.Tick(GOSSIP_TIME) for { select { case <-t: sn.viewmu.Lock() c := sn.HostListOn(sn.ViewNo) sn.viewmu.Unlock() if len(c) == 0 { log.Errorln(sn.Name(), "StartGossip: none in hostlist for view: ", sn.ViewNo, len(c)) continue } sn.randmu.Lock() from := c[sn.Rand.Int()%len(c)] sn.randmu.Unlock() dbg.Lvl4("Gossiping with: ", from) sn.CatchUp(int(atomic.LoadInt64(&sn.LastAppliedVote)+1), from) case <-sn.closed: dbg.Lvl3("stopping gossip: closed") return } } }() }
func homeHandler(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { dbg.Lvl4("home handler is handling non-home request") http.NotFound(w, r) return } dbg.Lvl4(master, "log server serving ", r.URL) host := r.Host // fmt.Println(host) ws := "ws://" + host + "/log" err := homePage.Execute(w, Home{ws, strconv.Itoa(cfg.Nmachs * cfg.Hpn), strconv.Itoa(cfg.Hpn), strconv.Itoa(cfg.Bf), strconv.Itoa(cfg.Hpn), strconv.Itoa(cfg.Nmsgs), strconv.Itoa(cfg.Rate)}) if err != nil { panic(err) log.Fatal(err) } }
func (sn *Node) CatchUp(vi int, from string) { dbg.Lvl4(sn.Name(), "attempting to catch up vote", vi) ctx := context.TODO() sn.PutTo(ctx, from, &SigningMessage{ From: sn.Name(), Type: CatchUpReq, Cureq: &CatchUpRequest{Index: vi}}) }
// initiated by root, propagated by all others func (sn *Node) Challenge(view int, chm *ChallengeMessage) error { // update max seen round sn.roundmu.Lock() sn.LastSeenRound = max(sn.LastSeenRound, chm.Round) sn.roundmu.Unlock() round := sn.Rounds[chm.Round] if round == nil { return nil } // register challenge round.c = chm.C if sn.Type == PubKey { dbg.Lvl4(sn.Name(), "challenge: using pubkey", sn.Type, chm.Vote) if err := sn.SendChildrenChallenges(view, chm); err != nil { return err } } else { dbg.Lvl4(sn.Name(), "challenge: using merkle proofs") // messages from clients, proofs computed if sn.CommitedFor(round) { if err := sn.SendLocalMerkleProof(view, chm); err != nil { return err } } if err := sn.SendChildrenChallengesProofs(view, chm); err != nil { return err } } // dbg.Lvl4(sn.Name(), "In challenge before response") sn.initResponseCrypto(chm.Round) // if we are a leaf, send the respond up if len(sn.Children(view)) == 0 { sn.Respond(view, chm.Round, nil) } // dbg.Lvl4(sn.Name(), "Done handling challenge message") return nil }
// AddParent adds a parent node to the TCPHost, for the given view. func (h *TCPHost) AddParent(view int, c string) { h.PeerLock.Lock() if _, ok := h.peers[c]; !ok { h.peers[c] = NewTCPConn(c) } // remove from pending peers list delete(h.PendingPeers, c) h.PeerLock.Unlock() dbg.Lvl4("Adding parent to views on", h.Name(), "for", c) h.views.AddParent(view, c) }
func (sn *Node) VerifyAllProofs(view int, chm *ChallengeMessage, proofForClient proof.Proof) { sn.roundLock.RLock() round := sn.Rounds[chm.Round] sn.roundLock.RUnlock() // proof from client to my root proof.CheckProof(sn.Suite().Hash, round.MTRoot, round.LocalMTRoot, round.Proofs["local"]) // proof from my root to big root dbg.Lvl4(sn.Name(), "verifying for view", view) proof.CheckProof(sn.Suite().Hash, chm.MTRoot, round.MTRoot, chm.Proof) // proof from client to big root proof.CheckProof(sn.Suite().Hash, chm.MTRoot, round.LocalMTRoot, proofForClient) }
func (sn *Node) AddSelf(parent string) error { dbg.Lvl4("AddSelf: connecting to:", parent) err := sn.ConnectTo(parent) if err != nil { return err } dbg.Lvl4("AddSelf: putting group change message to:", parent) return sn.PutTo( context.TODO(), parent, &SigningMessage{ Type: GroupChange, View: -1, Vrm: &VoteRequestMessage{ Vote: &Vote{ Type: AddVT, Av: &AddVote{ Name: sn.Name(), Parent: parent}}}}) }
func SshRunStdout(username, host, command string) error { addr := host if username != "" { addr = username + "@" + addr } dbg.Lvl4("Going to ssh to ", addr, command) cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", addr, "eval '"+command+"'") cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout return cmd.Run() }
func (s *Server) AggregateCommits(view int) []byte { //dbg.Lvl4(s.Name(), "calling AggregateCommits") s.mux.Lock() // get data from s once to avoid refetching from structure Queue := s.Queue READING := s.READING PROCESSING := s.PROCESSING // messages read will now be processed READING, PROCESSING = PROCESSING, READING s.READING, s.PROCESSING = s.PROCESSING, s.READING s.Queue[READING] = s.Queue[READING][:0] // give up if nothing to process if len(Queue[PROCESSING]) == 0 { s.mux.Unlock() s.Root = make([]byte, hashid.Size) s.Proofs = make([]proof.Proof, 1) return s.Root } // pull out to be Merkle Tree leaves s.Leaves = make([]hashid.HashId, 0) for _, msg := range Queue[PROCESSING] { s.Leaves = append(s.Leaves, hashid.HashId(msg.Tsm.Sreq.Val)) } s.mux.Unlock() // non root servers keep track of rounds here if !s.IsRoot(view) { s.rLock.Lock() lsr := s.LastRound() mr := s.maxRounds s.rLock.Unlock() // if this is our last round then close the connections if lsr >= mr && mr >= 0 { s.closeChan <- true } } // create Merkle tree for this round's messages and check corectness s.Root, s.Proofs = proof.ProofTree(s.Suite().Hash, s.Leaves) if sign.DEBUG == true { if proof.CheckLocalProofs(s.Suite().Hash, s.Root, s.Leaves, s.Proofs) == true { dbg.Lvl4("Local Proofs of", s.Name(), "successful for round "+strconv.Itoa(int(s.LastRound()))) } else { panic("Local Proofs" + s.Name() + " unsuccessful for round " + strconv.Itoa(int(s.LastRound()))) } } return s.Root }
func logHandler(ws *websocket.Conn) { dbg.Lvl4(master, "log server serving /log (websocket)") i := 0 for { Log.Mlock.RLock() end := Log.End Log.Mlock.RUnlock() if i >= end { time.Sleep(100 * time.Millisecond) continue } Log.Mlock.RLock() msg := Log.Msgs[i] Log.Mlock.RUnlock() _, err := ws.Write(msg) if err != nil { dbg.Lvl4("unable to write to log websocket") return } i++ } }
func logHandlerHtml(w http.ResponseWriter, r *http.Request) { dbg.Lvl4("Log handler: ", r.URL, "-", len(Log.Msgs)) //host := r.Host // fmt.Println(host) for i, _ := range Log.Msgs { var jsonlog map[string]*json.RawMessage err := json.Unmarshal(Log.Msgs[i], &jsonlog) if err != nil { log.Error("Couldn't unmarshal string") } w.Write([]byte(fmt.Sprintf("%s - %s - %s - %s", *jsonlog["etime"], *jsonlog["eapp"], *jsonlog["ehost"], *jsonlog["emsg"]))) w.Write([]byte("\n")) } }
func (d *Deter) WriteConfig(dirOpt ...string) { buf := new(bytes.Buffer) if err := toml.NewEncoder(buf).Encode(d); err != nil { log.Fatal(err) } dir := d.DeployDir if len(dirOpt) > 0 { dir = dirOpt[0] } err := ioutil.WriteFile(dir+"/config.toml", buf.Bytes(), 0660) if err != nil { log.Fatal(err) } dbg.Lvl4("Wrote login", d.Login, "to", dir) }
func (sn *Node) ReceivedHeartbeat(view int) { // XXX heartbeat should be associated with a specific view // if we get a heartbeat for an old view then nothing should change // there is a problem here where we could, if we receive a heartbeat // from an old view, try viewchanging into a view that we have already been to sn.hbLock.Lock() // hearbeat is nil if we have sust close the signing node if sn.heartbeat != nil { sn.heartbeat.Stop() sn.heartbeat = time.AfterFunc(HEARTBEAT, func() { dbg.Lvl4(sn.Name(), "NO HEARTBEAT - try view change:", view) sn.TryViewChange(view + 1) }) } sn.hbLock.Unlock() }
func (sn *Node) ApplyAction(view int, v *Vote) { dbg.Lvl4(sn.Name(), "APPLYING ACTION") switch v.Type { case AddVT: sn.AddPeerToHostlist(view, v.Av.Name) if sn.Name() == v.Av.Parent { sn.AddChildren(view, v.Av.Name) } case RemoveVT: // removes node from Hostlist, and from children list sn.RemovePeer(view, v.Rv.Name) // not closing TCP connection on remove because if view // does not go through, connection essential to old/ current view closed default: log.Errorln("applyvote: unkown action type") } }