func (round *RoundStamper) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { round.RoundCosi.SignatureBroadcast(in, out) round.Proof = round.RoundCosi.Cosi.Proof round.MTRoot = round.RoundCosi.Cosi.MTRoot round.CombProofs = make([]proof.Proof, len(round.StampQueue)) // Send back signature to clients for i, msg := range round.StampQueue { // proof to get from s.Root to big root combProof := make(proof.Proof, len(round.Proof)) copy(combProof, round.Proof) // add my proof to get from a leaf message to my root s.Root combProof = append(combProof, round.StampProofs[i]...) // proof that I can get from a leaf message to the big root if proof.CheckProof(round.Suite.Hash, round.MTRoot, round.StampLeaves[i], combProof) { dbg.Lvl2("Proof is OK for msg", msg) } else { dbg.Lvl2("Inclusion-proof failed") } round.CombProofs[i] = combProof } return nil }
// Runs two conodes and tests if the value returned is OK func TestStamp(t *testing.T) { dbg.TestOutput(testing.Verbose(), 4) peer1, peer2 := createPeers() go peer1.LoopRounds(conode.RoundStamperListenerType, 4) go peer2.LoopRounds(conode.RoundStamperListenerType, 4) time.Sleep(2 * time.Second) s, err := conode.NewStamp("testdata/config.toml") if err != nil { t.Fatal("Couldn't open config-file:", err) } for _, port := range []int{7000, 7010} { stamper := "localhost:" + strconv.Itoa(port) dbg.Lvl2("Contacting stamper", stamper) tsm, err := s.GetStamp([]byte("test"), stamper) dbg.Lvl3("Evaluating results of", stamper) if err != nil { t.Fatal("Couldn't get stamp from server:", err) } if !tsm.Srep.AggPublic.Equal(s.X0) { t.Fatal("Not correct aggregate public key") } } dbg.Lvl2("Closing peer1") peer1.Close() dbg.Lvl2("Closing peer2") peer2.Close() dbg.Lvl3("Done with test") }
// Verifies that the 'message' is included in the signature and that it // is correct. // Message is your own hash, and reply contains the inclusion proof + signature // on the aggregated message func VerifySignature(suite abstract.Suite, reply *StampSignature, public abstract.Point, message []byte) bool { // Check if aggregate public key is correct if !public.Equal(reply.AggPublic) { dbg.Lvl1("Aggregate-public-key check: FAILED (maybe you have an outdated config file of the tree)") return false } // First check if the challenge is ok if err := VerifyChallenge(suite, reply); err != nil { dbg.Lvl1("Challenge-check: FAILED (", err, ")") return false } dbg.Lvl2("Challenge-check: OK") // Incorporate the timestamp in the message since the verification process // is done by reconstructing the challenge var b bytes.Buffer if err := binary.Write(&b, binary.LittleEndian, reply.Timestamp); err != nil { dbg.Lvl1("Error marshaling the timestamp for signature verification") return false } msg := append(b.Bytes(), []byte(reply.MerkleRoot)...) if err := VerifySchnorr(suite, msg, public, reply.Challenge, reply.Response); err != nil { dbg.Lvl1("Signature-check: FAILED (", err, ")") return false } dbg.Lvl2("Signature-check: OK") // finally check the proof if !proof.CheckProof(suite.Hash, reply.MerkleRoot, hashid.HashId(message), reply.Prf) { dbg.Lvl2("Inclusion-check: FAILED") return false } dbg.Lvl2("Inclusion-check: OK") return true }
// Monitor will start listening for incoming connections on this address // It needs the stats struct pointer to update when measures come // Return an error if something went wrong during the connection setup func (m *Monitor) Listen() error { ln, err := net.Listen("tcp", Sink+":"+SinkPort) if err != nil { return fmt.Errorf("Error while monitor is binding address: %v", err) } m.listener = ln dbg.Lvl2("Monitor listening for stats on", Sink, ":", SinkPort) finished := false go func() { for { if finished { break } conn, err := ln.Accept() if err != nil { operr, ok := err.(*net.OpError) // We cant accept anymore we closed the listener if ok && operr.Op == "accept" { break } dbg.Lvl2("Error while monitor accept connection:", operr) continue } dbg.Lvl3("Monitor: new connection from", conn.RemoteAddr().String()) m.mutexConn.Lock() m.conns[conn.RemoteAddr().String()] = conn go m.handleConnection(conn) m.mutexConn.Unlock() } }() for !finished { select { // new stats case measure := <-m.measures: m.update(measure) // end of a peer conn case peer := <-m.done: dbg.Lvl3("Connections left:", len(m.conns)) m.mutexConn.Lock() delete(m.conns, peer) m.mutexConn.Unlock() // end of monitoring, if len(m.conns) == 0 { m.listener.Close() finished = true break } } } dbg.Lvl2("Monitor finished waiting !") m.conns = make(map[string]net.Conn) return nil }
func main() { Current := new(Node) Magic := [4]byte{0xF9, 0xBE, 0xB4, 0xD9} Current.IP = net.IPv4(0, 1, 2, 3) Current.PublicKey = "my_cool_key" Current.Last_Block = "0" Parser, _ := BitCoSi.NewParser("/home/lefteris/hi/blocks", Magic) server := "localhost:2011" // suite = app.GetSuite("25519") dbg.Lvl2("Connecting to", server) conn := coconet.NewTCPConn(server) err := conn.Connect() if err != nil { dbg.Fatal("Error when getting the connection to the host:", err) } dbg.Lvl1("Connected to ", server) go wait_for_blocks() for i := 0; i < 10; i++ { Current.transaction_pool = Parser.Parse(i, 10+i) for len(Current.transaction_pool) > 0 { msg := &BitCoSi.BitCoSiMessage{ Type: BitCoSi.TransactionAnnouncmentType, ReqNo: 0, Treq: &BitCoSi.TransactionAnnouncment{Val: Current.transaction_pool[0]}} err = conn.PutData(msg) Current.transaction_pool = Current.transaction_pool[1:] if err != nil { dbg.Fatal("Couldn't send hash-message to server: ", err) } time.Sleep(10 * time.Millisecond) } } wait_for_Key_blocks() time.Sleep(900000 * time.Millisecond) // Asking to close the connection err = conn.PutData(&BitCoSi.BitCoSiMessage{ ReqNo: 1, Type: BitCoSi.BitCoSiClose, }) conn.Close() dbg.Lvl2("Connection closed with server") }
// The core of the file: read any input from the connection and outputs it into // the server connection func proxyConnection(conn net.Conn, done chan bool) { dec := json.NewDecoder(conn) nerr := 0 for { m := Measure{} // Receive data if err := dec.Decode(&m); err != nil { if err == io.EOF { break } dbg.Lvl1("Error receiving data from", conn.RemoteAddr().String(), ":", err) nerr += 1 if nerr > 1 { dbg.Lvl1("Too many errors from", conn.RemoteAddr().String(), ": Abort connection") break } } dbg.Lvl3("Proxy received", m) // Implement our own ready-count, so it doesn't have to go through the // main monitor which might be far away. switch m.Name { case "ready": atomic.AddInt64(&readyCount, 1) case "ready_count": m.Ready = int(readyCount) err := json.NewEncoder(conn).Encode(m) if err != nil { dbg.Lvl2("Couldn't send ready-result back to client") break } default: // Proxy data - add who is sending, as we only have one channel // to the server m.Sender = conn.RemoteAddr().String() if err := serverEnc.Encode(m); err != nil { dbg.Lvl2("Error proxying data :", err) break } if m.Name == "end" { // the end dbg.Lvl2("Proxy detected end of measurement. Closing connection.") break } } } conn.Close() done <- true }
func main() { deter.ReadConfig() // The flags are defined in lib/app app.FlagInit() flag.Parse() setup_deter() var wg sync.WaitGroup virts := physToServer[app.RunFlags.PhysAddr] if len(virts) > 0 { dbg.Lvl3("starting", len(virts), "servers of", deter.App, "on", virts) for _, name := range virts { dbg.Lvl3("Starting", name, "on", app.RunFlags.PhysAddr) wg.Add(1) go func(nameport string) { dbg.Lvl3("Running on", app.RunFlags.PhysAddr, "starting", nameport, rootname) defer wg.Done() amroot := nameport == rootname args := []string{ "-hostname=" + nameport, "-physaddr=" + app.RunFlags.PhysAddr, "-amroot=" + strconv.FormatBool(amroot), "-test_connect=" + strconv.FormatBool(testConnect), "-logger=" + app.RunFlags.Logger, "-mode=server", } dbg.Lvl3("Starting on", app.RunFlags.PhysAddr, "with args", args) cmdApp := exec.Command("./"+deter.App, args...) cmdApp.Stdout = os.Stdout cmdApp.Stderr = os.Stderr err := cmdApp.Run() if err != nil { dbg.Lvl1("cmd run:", err) } dbg.Lvl3("Finished with app", app.RunFlags.PhysAddr) }(name) } dbg.Lvl3(app.RunFlags.PhysAddr, "Finished starting apps") wg.Wait() } else { dbg.Lvl2("No apps for", app.RunFlags.PhysAddr) } dbg.Lvl2(app.RunFlags.PhysAddr, "forkexec exited") }
// LoopRounds starts the system by sending a round of type // 'roundType' every second for number of 'rounds'. // If 'rounds' < 0, it loops forever, or until you call // peer.Close(). func (peer *Peer) LoopRounds(roundType string, rounds int) { dbg.Lvl3("Stamp-server", peer.Node.Name(), "starting with IsRoot=", peer.IsRoot(peer.ViewNo)) ticker := time.NewTicker(sign.ROUND_TIME) firstRound := peer.Node.LastRound() if !peer.IsRoot(peer.ViewNo) { // Children don't need to tick, only the root. ticker.Stop() } for { select { case nextRole := <-peer.ViewChangeCh(): dbg.Lvl2(peer.Name(), "assuming next role is", nextRole) case <-peer.CloseChan: dbg.Lvl3("Server-peer", peer.Name(), "has closed the connection") return case <-ticker.C: dbg.Lvl3("Ticker is firing in", peer.Hostname) roundNbr := peer.LastRound() - firstRound if roundNbr >= rounds && rounds >= 0 { dbg.Lvl3(peer.Name(), "reached max round: closing", roundNbr, ">=", rounds) ticker.Stop() if peer.IsRoot(peer.ViewNo) { dbg.Lvl3("As I'm root, asking everybody to terminate") peer.SendCloseAll() } } else { if peer.IsRoot(peer.ViewNo) { dbg.Lvl2(peer.Name(), "Stamp server in round", roundNbr+1, "of", rounds) round, err := sign.NewRoundFromType(roundType, peer.Node) if err != nil { dbg.Fatal("Couldn't create", roundType, err) } err = peer.StartAnnouncement(round) if err != nil { dbg.Lvl3(err) time.Sleep(1 * time.Second) break } } else { dbg.Lvl3(peer.Name(), "running as regular") } } } } }
// For testing the different round-types // Every round-type is in his own Test*-method, // so one can easily run just a given round-test func testRound(t *testing.T, roundType string) { dbg.TestOutput(testing.Verbose(), 4) dbg.Lvl2("Testing", roundType) peer1, peer2 := createPeers() round, err := sign.NewRoundFromType(roundType, peer1.Node) if err != nil { t.Fatal("Couldn't create", roundType, "round:", err) } peer1.StartAnnouncement(round) time.Sleep(time.Second) var cosi *sign.CosiStruct switch roundType { case sign.RoundCosiType: cosi = round.(*sign.RoundCosi).Cosi case sign.RoundExceptionType: cosi = round.(*sign.RoundException).Cosi case conode.RoundStamperType: cosi = round.(*conode.RoundStamper).Cosi case conode.RoundStamperListenerType: cosi = round.(*conode.RoundStamperListener).Cosi } if cosi.R_hat == nil { t.Fatal("Didn't finish round - R_hat empty") } err = cosi.VerifyResponses() if err != nil { t.Fatal("Couldn't verify responses") } peer1.Close() peer2.Close() }
// GetStamp contacts the "server" and waits for the "msg" to // be signed // If server is empty, it will contact one randomly func (s *Stamp) GetStamp(msg []byte, server string) (*TimeStampMessage, error) { if server == "" { server = s.Config.Hosts[rand.Intn(len(s.Config.Hosts))] } dbg.Lvl2("StampClient will stamp on server", server) portstr := strconv.Itoa(cliutils.GetPort(server, DefaultPort) + 1) err := s.connect(cliutils.GetAddress(server) + ":" + portstr) if err != nil { return nil, err } tsm, err := s.stamp(msg) if err != nil { return nil, err } err = s.disconnect() if err != nil { return nil, err } // Verify if what we received is correct if !VerifySignature(s.Suite, tsm.Srep, s.X0, msg) { return nil, fmt.Errorf("Verification of signature failed") } return tsm, nil }
// ConnectTo will connect to the given host and start the SYN exchange (public key + id) func (p *Peer) ConnectTo(host string) error { tick := time.NewTicker(ConnWaitRetry) count := 0 for range tick.C { // connect conn, err := net.Dial("tcp", host) if err != nil { // we have tried too many times => abort if count == ConnRetry { tick.Stop() dbg.Fatal(p.Name, "could not connect to", host, "", ConnRetry, "times. Abort.") // let's try again one more time } else { dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in", ConnWaitRetry.String()) count += 1 } } // handle successful connection dbg.Lvl3(p.Name, "has connected with peer", host) tick.Stop() // start to syn with the respective peer go p.synWithPeer(conn) break } return nil }
func (round *RoundStamperListener) SignatureBroadcast(in *sign.SigningMessage, out []*sign.SigningMessage) error { round.RoundStamper.SignatureBroadcast(in, out) if round.IsRoot { in.SBm.Messages = round.roundMessages } for _, o := range out { o.SBm.Messages = in.SBm.Messages } for i, msg := range round.ClientQueue { respMessg := &TimeStampMessage{ Type: StampSignatureType, ReqNo: SeqNo(msg.ReqNo), Srep: &StampSignature{ SuiteStr: round.Suite.String(), Timestamp: round.Timestamp, MerkleRoot: round.MTRoot, Prf: round.RoundStamper.CombProofs[i], Response: in.SBm.R0_hat, Challenge: in.SBm.C, AggCommit: in.SBm.V0_hat, AggPublic: in.SBm.X0_hat, }} round.PutToClient(msg.To, respMessg) dbg.Lvl2("Sent signature response back to client", msg.To) } return nil }
// Filter out a serie of values func (df *DataFilter) Filter(measure string, values []float64) []float64 { // do we have a filter for this measure ? if _, ok := df.percentiles[measure]; !ok { return values } // Compute the percentile value max, err := stats.PercentileNearestRank(values, df.percentiles[measure]) if err != nil { dbg.Lvl2("Monitor: Error filtering data:", err) return values } // Find the index from where to filter maxIndex := -1 for i, v := range values { if v > max { maxIndex = i } } // check if we foud something to filter out if maxIndex == -1 { dbg.Lvl3("Filtering: nothing to filter for", measure) return values } // return the values below the percentile dbg.Lvl3("Filtering: filters out", measure, ":", maxIndex, "/", len(values)) return values[:maxIndex] }
func (sn *Node) CloseAll(view int) error { dbg.Lvl2(sn.Name(), "received CloseAll on", view) // At the leaves if len(sn.Children(view)) == 0 { dbg.Lvl3(sn.Name(), "in CloseAll is root leaf") } else { dbg.Lvl3(sn.Name(), "in CloseAll is calling", len(sn.Children(view)), "children") // Inform all children of announcement messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view)) for i := range messgs { sm := SigningMessage{ Suite: sn.Suite().String(), Type: CloseAll, ViewNbr: view, //LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)), } messgs[i] = &sm } ctx := context.TODO() if err := sn.PutDown(ctx, view, messgs); err != nil { return err } } dbg.Lvl3("Closing down shop", sn.Isclosed) sn.Close() return nil }
// Checks the correct setup of the signature func TestsetupTestSig(t *testing.T) { setupTestSig() if !reply.AggPublic.Equal(X0) { t.Error("X0 is not equal") } else { dbg.Lvl2("X0 is OK") } }
func readConfig() *app.ConfigConode { conf := &app.ConfigConode{} if err := app.ReadTomlConfig(conf, "testdata/config.toml"); err != nil { dbg.Fatal("Could not read toml config... : ", err) } dbg.Lvl2("Configuration file read") suite = app.GetSuite(conf.Suite) return conf }
// Verifies whether the X0 and hash is correct func TestVerifySignature(t *testing.T) { setupTestSig() if !conode.VerifySignature(suite, &reply, X0, hash) { t.Error("Verification failed") } else { dbg.Lvl2("Verification passed") } }
func (sn *Node) ShouldIFail(phase string) bool { if sn.FailureRate > 0 { // If we were manually set to always fail if sn.Host.(*coconet.FaultyHost).IsDead() || sn.Host.(*coconet.FaultyHost).IsDeadFor(phase) { dbg.Lvl2(sn.Name(), "dead for "+phase) return true } // If we were only given a probability of failing if p := sn.Rand.Int() % 100; p < sn.FailureRate { dbg.Lvl2(sn.Name(), "died for "+phase, "p", p, "with prob", sn.FailureRate) return true } } return false }
// StopMonitor will close every connections it has // And will stop updating the stats func (m *Monitor) Stop() { dbg.Lvl2("Monitor Stop") m.listener.Close() m.mutexConn.Lock() for _, c := range m.conns { c.Close() } m.mutexConn.Unlock() }
// Verifies whether the Challenge is correct func TestVerifyChallenge(t *testing.T) { setupTestSig() err := conode.VerifyChallenge(suite, &reply) if err != nil { t.Error("Verification failed") } else { dbg.Lvl2("Verification passed") } }
// SImply adds all the public keys we give to it func aggregateKeys(pubs []string) abstract.Point { k0 := suite.Point().Null() for i, ki := range pubs { // convert from string to public key kip, _ := cliutils.ReadPub64(suite, strings.NewReader(ki)) k0 = k0.Add(k0, kip) dbg.Lvl2("Public key n#", i, ":", kip) } dbg.Lvl1("Aggregated public key:", k0) return k0 }
// Returns name of node who should be the root for the next view // round robin is used on the array of host names to determine the next root func (sn *Node) RootFor(view int) string { dbg.Lvl2(sn.Name(), "Root for view", view) var hl []string if view == 0 { hl = sn.HostListOn(view) } else { // we might not have the host list for current view // safer to use the previous view's hostlist, always hl = sn.HostListOn(view - 1) } return hl[view%len(hl)] }
func ExampleLevel2() { dbg.DebugVisible = 2 dbg.Lvl1("Level1") dbg.Lvl2("Level2") dbg.Lvl3("Level3") dbg.Lvl4("Level4") dbg.Lvl5("Level5") // Output: // 1: ( dbg_test.ExampleLevel2: 0) - Level1 // 2: ( dbg_test.ExampleLevel2: 0) - Level2 }
func (round *RoundSetup) Commitment(in []*SigningMessage, out *SigningMessage) error { out.Com.Messages = 1 if !round.IsLeaf { for _, i := range in { out.Com.Messages += i.Com.Messages } } if round.IsRoot { dbg.Lvl2("Number of nodes found:", out.Com.Messages) round.Counted <- out.Com.Messages } return nil }
// handleConnection will decode the data received and aggregates it into its // stats func (m *Monitor) handleConnection(conn net.Conn) { dec := json.NewDecoder(conn) enc := json.NewEncoder(conn) nerr := 0 for { measure := Measure{} if err := dec.Decode(&measure); err != nil { // if end of connection if err == io.EOF { break } // otherwise log it dbg.Lvl2("Error monitor decoding from", conn.RemoteAddr().String(), ":", err) nerr += 1 if nerr > 1 { dbg.Lvl2("Monitor: too many errors from", conn.RemoteAddr().String(), ": Abort.") break } } dbg.Lvlf3("Monitor: received a Measure from %s: %+v", conn.RemoteAddr().String(), measure) // Special case where the measurement is indicating a FINISHED step switch strings.ToLower(measure.Name) { case "end": dbg.Lvl3("Finishing monitor") m.done <- conn.RemoteAddr().String() case "ready": m.stats.Ready++ dbg.Lvl3("Increasing counter to", m.stats.Ready) case "ready_count": dbg.Lvl3("Sending stats") m_send := measure m_send.Ready = m.stats.Ready enc.Encode(m_send) default: m.measures <- measure } } }
// Will build the application func (d *Localhost) Build(build string) error { src, _ := filepath.Rel(d.LocalDir, d.AppDir+"/"+d.App) dst := d.RunDir + "/" + d.App start := time.Now() // build for the local machine res, err := cliutils.Build(src, dst, runtime.GOARCH, runtime.GOOS) if err != nil { dbg.Fatal("Error while building for localhost (src", src, ", dst", dst, ":", res) } dbg.Lvl3("Localhost: Build src", src, ", dst", dst) dbg.Lvl3("Localhost: Results of localhost build:", res) dbg.Lvl2("Localhost: build finished in", time.Since(start)) return err }
// Verifies whether the Schnorr signature is correct func TestVerifySchnorr(t *testing.T) { setupTestSig() var b bytes.Buffer if err := binary.Write(&b, binary.LittleEndian, reply.Timestamp); err != nil { dbg.Lvl1("Error marshaling the timestamp for signature verification") } msg := append(b.Bytes(), []byte(reply.MerkleRoot)...) err := conode.VerifySchnorr(suite, msg, X0, reply.Challenge, reply.Response) if err != nil { dbg.Fatal("Schnorr verification failed") } else { dbg.Lvl2("Schnorr OK") } }
func wait_for_blocks() { server := "localhost:2011" suite = app.GetSuite("25519") dbg.Lvl2("Connecting to", server) conn := coconet.NewTCPConn(server) err := conn.Connect() if err != nil { dbg.Fatal("Error when getting the connection to the host:", err) } dbg.Lvl1("Connected to ", server) for i := 0; i < 1000; i++ { time.Sleep(1 * time.Second) msg := &BitCoSi.BitCoSiMessage{ Type: BitCoSi.BlockRequestType, ReqNo: 0, } err = conn.PutData(msg) if err != nil { dbg.Fatal("Couldn't send hash-message to server: ", err) } dbg.Lvl1("Sent signature request") // Wait for the signed message tsm := new(BitCoSi.BitCoSiMessage) tsm.Brep = &BitCoSi.BlockReply{} tsm.Brep.SuiteStr = suite.String() err = conn.GetData(tsm) if err != nil { dbg.Fatal("Error while receiving signature:", err) } //dbg.Lvlf1("Got signature response %+v", tsm.Brep) T := new(BitCoSi.TrBlock) T.Block = tsm.Brep.Block T.Print() dbg.Lvlf1("Response %v ", tsm.Brep.Response) } // Asking to close the connection err = conn.PutData(&BitCoSi.BitCoSiMessage{ ReqNo: 1, Type: BitCoSi.BitCoSiClose, }) conn.Close() }
// Update will update the Stats with this given measure func (s *Stats) Update(m Measure) { var meas *Measurement meas, ok := s.measures[m.Name] if !ok { // if we already written some values, we can not take new ones if s.valuesWritten { dbg.Lvl2("Stats Update received unknown type of measure:", m.Name) return } meas = NewMeasurement(m.Name, s.filter) s.measures[m.Name] = meas s.keys = append(s.keys, m.Name) } meas.Update(m) }
// returns a tuple of start and stop configurations to run func getStartStop(rcs int) (int, int) { ss_str := strings.Split(simRange, ":") start, err := strconv.Atoi(ss_str[0]) stop := rcs if err == nil { stop = start if len(ss_str) > 1 { stop, err = strconv.Atoi(ss_str[1]) if err != nil { stop = rcs } } } dbg.Lvl2("Range is", start, "...", stop) return start, stop }