// Instantiate returns a new NTree protocol instance func (nt *NtreeServer) Instantiate(node *sda.TreeNodeInstance) (sda.ProtocolInstance, error) { log.Lvl2("Waiting for enough transactions...") currTransactions := nt.WaitEnoughBlocks() pi, err := NewNTreeRootProtocol(node, currTransactions) log.Lvl2("Instantiated Ntree Root Protocol with", len(currTransactions), "transactions") return pi, err }
// The core of the file: read any input from the connection and outputs it into // the server connection func proxyConnection(conn net.Conn, done chan bool) { dec := json.NewDecoder(conn) nerr := 0 for { m := SingleMeasure{} // Receive data if err := dec.Decode(&m); err != nil { if err == io.EOF { break } log.Lvl1("Error receiving data from", conn.RemoteAddr().String(), ":", err) nerr++ if nerr > 1 { log.Lvl1("Too many errors from", conn.RemoteAddr().String(), ": Abort connection") break } } log.Lvl3("Proxy received", m) // Proxy data back to monitor if err := serverEnc.Encode(m); err != nil { log.Lvl2("Error proxying data :", err) break } if m.Name == "end" { // the end log.Lvl2("Proxy detected end of measurement. Closing connection.") break } } if err := conn.Close(); err != nil { log.Error("Couldn't close connection:", err) } done <- true }
func (jv *JVSS) initSecret(sid SID) error { // Initialise shared secret of given type if necessary if _, ok := jv.secrets[sid]; !ok { log.Lvl2(fmt.Sprintf("Node %d: Initialising %s shared secret", jv.Index(), sid)) sec := &Secret{ receiver: poly.NewReceiver(jv.keyPair.Suite, jv.info, jv.keyPair), deals: make(map[int]*poly.Deal), sigs: make(map[int]*poly.SchnorrPartialSig), numConfs: 0, } jv.secrets[sid] = sec } secret := jv.secrets[sid] // Initialise and broadcast our deal if necessary if len(secret.deals) == 0 { kp := config.NewKeyPair(jv.keyPair.Suite) deal := new(poly.Deal).ConstructDeal(kp, jv.keyPair, jv.info.T, jv.info.R, jv.pubKeys) log.Lvl2(fmt.Sprintf("Node %d: Initialising %v deal", jv.Index(), sid)) secret.deals[jv.Index()] = deal db, _ := deal.MarshalBinary() msg := &SecInitMsg{ Src: jv.Index(), SID: sid, Deal: db, } if err := jv.Broadcast(msg); err != nil { return err } } return nil }
func TestProtocolHandlers(t *testing.T) { defer log.AfterTest(t) local := sda.NewLocalTest() _, _, tree := local.GenTree(3, false, true, true) defer local.CloseAll() log.Lvl2("Sending to children") IncomingHandlers = make(chan *sda.TreeNodeInstance, 2) p, err := local.CreateProtocol(tree, "ProtocolHandlers") if err != nil { t.Fatal(err) } go p.Start() log.Lvl2("Waiting for responses") child1 := <-IncomingHandlers child2 := <-IncomingHandlers if child1.ServerIdentity().ID == child2.ServerIdentity().ID { t.Fatal("Both entities should be different") } log.Lvl2("Sending to parent") tni := p.(*ProtocolHandlers).TreeNodeInstance child1.SendTo(tni.TreeNode(), &NodeTestAggMsg{}) if len(IncomingHandlers) > 0 { t.Fatal("This should not trigger yet") } child2.SendTo(tni.TreeNode(), &NodeTestAggMsg{}) final := <-IncomingHandlers if final.ServerIdentity().ID != tni.ServerIdentity().ID { t.Fatal("This should be the same ID") } }
// handleConnection will decode the data received and aggregates it into its // stats func (m *Monitor) handleConnection(conn net.Conn) { dec := json.NewDecoder(conn) nerr := 0 for { measure := &SingleMeasure{} if err := dec.Decode(measure); err != nil { // if end of connection if err == io.EOF || strings.Contains(err.Error(), "closed") { break } // otherwise log it log.Lvl2("Error: monitor decoding from", conn.RemoteAddr().String(), ":", err) nerr++ if nerr > 1 { log.Lvl2("Monitor: too many errors from", conn.RemoteAddr().String(), ": Abort.") break } } log.Lvlf3("Monitor: received a Measure from %s: %+v", conn.RemoteAddr().String(), measure) // Special case where the measurement is indicating a FINISHED step switch strings.ToLower(measure.Name) { case "end": log.Lvl3("Finishing monitor") m.done <- conn.RemoteAddr().String() default: m.measures <- measure } } }
func (bp *BlockingProtocol) Dispatch() error { // first wait on stopBlockChan <-bp.stopBlockChan log.Lvl2("BlockingProtocol: will continue") // Then wait on the actual message <-bp.Incoming log.Lvl2("BlockingProtocol: received message => signal Done") // then signal that you are done bp.doneChan <- true return nil }
func sendrcv(from, to *sda.Host) error { err := from.SendRaw(to.ServerIdentity, &SimpleMessage{12}) if err != nil { return errors.New("Couldn't send message: " + err.Error()) } // Receive the message log.Lvl2("Waiting to receive") msg := to.Receive() log.Lvl2("Received") if msg.Msg.(SimpleMessage).I != 12 { return errors.New("Simple message got distorted") } return nil }
// Filter out a serie of values func (df *DataFilter) Filter(measure string, values []float64) []float64 { // do we have a filter for this measure ? if _, ok := df.percentiles[measure]; !ok { return values } // Compute the percentile value max, err := stats.PercentileNearestRank(values, df.percentiles[measure]) if err != nil { log.Lvl2("Monitor: Error filtering data(", values, "):", err) return values } // Find the index from where to filter maxIndex := -1 for i, v := range values { if v > max { maxIndex = i } } // check if we foud something to filter out if maxIndex == -1 { log.Lvl3("Filtering: nothing to filter for", measure) return values } // return the values below the percentile log.Lvl3("Filtering: filters out", measure, ":", maxIndex, "/", len(values)) return values[:maxIndex] }
func (jv *JVSS) handleSigResp(m WSigRespMsg) error { msg := m.SigRespMsg // Collect partial signatures secret, ok := jv.secrets[msg.SID] if !ok { return fmt.Errorf("Error, shared secret does not exist") } secret.sigs[msg.Src] = msg.Sig log.Lvl2(fmt.Sprintf("Node %d: %s signatures %d/%d", jv.Index(), msg.SID, len(secret.sigs), len(jv.List()))) // Create Schnorr signature once we received enough partial signatures if jv.info.T == len(secret.sigs) { for _, sig := range secret.sigs { if err := jv.schnorr.AddPartialSig(sig); err != nil { return err } } sig, err := jv.schnorr.Sig() if err != nil { return err } jv.sigChan <- sig // Cleanup short-term shared secret delete(jv.secrets, msg.SID) } return nil }
// Tests a 2-node system func TestCloseall(t *testing.T) { defer log.AfterTest(t) log.TestOutput(testing.Verbose(), 4) local := sda.NewLocalTest() nbrNodes := 2 _, _, tree := local.GenTree(nbrNodes, false, true, true) defer local.CloseAll() pi, err := local.CreateProtocol(tree, "ExampleChannels") if err != nil { t.Fatal("Couldn't start protocol:", err) } go pi.Start() protocol := pi.(*channels.ProtocolExampleChannels) timeout := network.WaitRetry * time.Duration(network.MaxRetry*nbrNodes*2) * time.Millisecond select { case children := <-protocol.ChildCount: log.Lvl2("Instance 1 is done") if children != nbrNodes { t.Fatal("Didn't get a child-cound of", nbrNodes) } case <-time.After(timeout): t.Fatal("Didn't finish in time") } }
func (c *Client) triggerTransactions(blocksPath string, nTxs int) error { log.Lvl2("ByzCoin Client will trigger up to", nTxs, "transactions") parser, err := blockchain.NewParser(blocksPath, magicNum) if err != nil { log.Error("Error: Couldn't parse blocks in", blocksPath, ".\nPlease download bitcoin blocks as .dat files first and place them in", blocksPath, "Either run a bitcoin node (recommended) or using a torrent.") return err } transactions, err := parser.Parse(0, ReadFirstNBlocks) if err != nil { return fmt.Errorf("Error while parsing transactions %v", err) } if len(transactions) == 0 { return errors.New("Couldn't read any transactions.") } if len(transactions) < nTxs { return fmt.Errorf("Read only %v but caller wanted %v", len(transactions), nTxs) } consumed := nTxs for consumed > 0 { for _, tr := range transactions { // "send" transaction to server (we skip tcp connection on purpose here) c.srv.AddTransaction(tr) } consumed-- } return nil }
// Tests an n-node system func TestPropagate(t *testing.T) { for _, nbrNodes := range []int{3, 10, 14} { local := sda.NewLocalTest() _, el, _ := local.GenTree(nbrNodes, false, true, true) o := local.Overlays[el.List[0].ID] i := 0 msg := &PropagateMsg{[]byte("propagate")} tree := el.GenerateNaryTreeWithRoot(8, o.ServerIdentity()) log.Lvl2("Starting to propagate", reflect.TypeOf(msg)) pi, err := o.CreateProtocolSDA(tree, "Propagate") log.ErrFatal(err) nodes, err := propagateStartAndWait(pi, msg, 1000, func(m network.Body) { if bytes.Equal(msg.Data, m.(*PropagateMsg).Data) { i++ } else { t.Error("Didn't receive correct data") } }) log.ErrFatal(err) if i != 1 { t.Fatal("Didn't get data-request") } if nodes != nbrNodes { t.Fatal("Not all nodes replied") } local.CloseAll() } }
func TestProcessor_AddMessage(t *testing.T) { defer log.AfterTest(t) p := NewServiceProcessor(nil) log.ErrFatal(p.RegisterMessage(procMsg)) if len(p.functions) != 1 { t.Fatal("Should have registered one function") } mt := network.TypeFromData(&testMsg{}) if mt == network.ErrorType { t.Fatal("Didn't register message-type correctly") } var wrongFunctions = []interface{}{ procMsgWrong1, procMsgWrong2, procMsgWrong3, procMsgWrong4, procMsgWrong5, procMsgWrong6, } for _, f := range wrongFunctions { log.Lvl2("Checking function", reflect.TypeOf(f).String()) err := p.RegisterMessage(f) if err == nil { t.Fatalf("Shouldn't accept function %+s", reflect.TypeOf(f).String()) } } }
// Tests a 2-node system func TestBroadcast(t *testing.T) { defer log.AfterTest(t) log.TestOutput(testing.Verbose(), 3) for _, nbrNodes := range []int{3, 10, 14} { local := sda.NewLocalTest() _, _, tree := local.GenTree(nbrNodes, false, true, true) pi, err := local.CreateProtocol(tree, "Broadcast") if err != nil { t.Fatal("Couldn't start protocol:", err) } protocol := pi.(*manage.Broadcast) done := make(chan bool) protocol.RegisterOnDone(func() { done <- true }) protocol.Start() timeout := network.WaitRetry * time.Duration(network.MaxRetry*nbrNodes*2) * time.Millisecond select { case <-done: log.Lvl2("Done with connecting everybody") case <-time.After(timeout): t.Fatal("Didn't finish in time") } local.CloseAll() } }
// Wait for all processes to finish func (d *Localhost) Wait() error { log.Lvl3("Waiting for processes to finish") var err error go func() { d.wgRun.Wait() log.Lvl3("WaitGroup is 0") // write to error channel when done: d.errChan <- nil }() // if one of the hosts fails, stop waiting and return the error: select { case e := <-d.errChan: log.Lvl3("Finished waiting for hosts:", e) if e != nil { if err := d.Cleanup(); err != nil { log.Error("Couldn't cleanup running instances", err) } err = e } } log.Lvl2("Processes finished") return err }
// SendRaw sends to an ServerIdentity without wrapping the msg into a SDAMessage func (h *Host) SendRaw(e *network.ServerIdentity, msg network.Body) error { if msg == nil { return errors.New("Can't send nil-packet") } h.networkLock.RLock() c, ok := h.connections[e.ID] h.networkLock.RUnlock() if !ok { var err error c, err = h.Connect(e) if err != nil { return err } } log.Lvlf4("%s sends to %s msg: %+v", h.ServerIdentity.Addresses, e, msg) var err error err = c.Send(context.TODO(), msg) if err != nil /*&& err != network.ErrClosed*/ { log.Lvl2("Couldn't send to", c.ServerIdentity().First(), ":", err, "trying again") c, err = h.Connect(e) if err != nil { return err } err = c.Send(context.TODO(), msg) if err != nil { return err } } log.Lvl5("Message sent") return nil }
// Instantiate takes blockSize transactions and create the byzcoin instances. func (s *Server) Instantiate(node *sda.TreeNodeInstance) (sda.ProtocolInstance, error) { // wait until we have enough blocks currTransactions := s.WaitEnoughBlocks() log.Lvl2("Instantiate ByzCoin Round with", len(currTransactions), "transactions") pi, err := NewByzCoinRootProtocol(node, currTransactions, s.timeOutMs, s.fail) return pi, err }
func TestBftCoSi(t *testing.T) { defer log.AfterTest(t) log.TestOutput(testing.Verbose(), 4) // Register test protocol using BFTCoSi sda.ProtocolRegisterName(TestProtocolName, func(n *sda.TreeNodeInstance) (sda.ProtocolInstance, error) { return NewBFTCoSiProtocol(n, verify) }) for _, nbrHosts := range []int{3, 13} { countMut.Lock() veriCount = 0 countMut.Unlock() log.Lvl2("Running BFTCoSi with", nbrHosts, "hosts") local := sda.NewLocalTest() _, _, tree := local.GenBigTree(nbrHosts, nbrHosts, 3, true, true) done := make(chan bool) // create the message we want to sign for this round msg := []byte("Hello BFTCoSi") // Start the protocol node, err := local.CreateProtocol(tree, TestProtocolName) if err != nil { t.Fatal("Couldn't create new node:", err) } // Register the function generating the protocol instance var root *ProtocolBFTCoSi root = node.(*ProtocolBFTCoSi) root.Msg = msg // function that will be called when protocol is finished by the root root.RegisterOnDone(func() { done <- true }) go node.Start() // are we done yet? wait := time.Second * 3 select { case <-done: countMut.Lock() assert.Equal(t, veriCount, nbrHosts, "Each host should have called verification.") // if assert fails we don't care for unlocking (t.Fail) countMut.Unlock() sig := root.Signature() if err := cosi.VerifyCosiSignatureWithException(root.Suite(), root.AggregatedPublic, msg, sig.Sig, sig.Exceptions); err != nil { t.Fatal(fmt.Sprintf("%s Verification of the signature failed: %s", root.Name(), err.Error())) } case <-time.After(wait): t.Fatal("Waited", wait, "sec for BFTCoSi to finish ...") } local.CloseAll() } }
// PropagateStartAndWait starts the propagation protocol and blocks until // all children stored the new value or the timeout has been reached. // The return value is the number of nodes that acknowledged having // stored the new value or an error if the protocol couldn't start. func PropagateStartAndWait(c sda.Context, el *sda.Roster, msg network.Body, msec int, f func(network.Body)) (int, error) { tree := el.GenerateNaryTreeWithRoot(8, c.ServerIdentity()) log.Lvl2("Starting to propagate", reflect.TypeOf(msg)) pi, err := c.CreateProtocolService(tree, "Propagate") if err != nil { return -1, err } return propagateStartAndWait(pi, msg, msec, f) }
func (jv *JVSS) finaliseSecret(sid SID) error { secret, ok := jv.secrets[sid] if !ok { return fmt.Errorf("Error, shared secret does not exist") } log.Lvl2(fmt.Sprintf("Node %d: %s deals %d/%d", jv.Index(), sid, len(secret.deals), len(jv.List()))) if len(secret.deals) == jv.info.T { for _, deal := range secret.deals { if _, err := secret.receiver.AddDeal(jv.Index(), deal); err != nil { return err } } sec, err := secret.receiver.ProduceSharedSecret() if err != nil { return err } secret.secret = sec secret.mtx.Lock() secret.numConfs++ secret.mtx.Unlock() log.Lvl2(fmt.Sprintf("Node %d: %v created", jv.Index(), sid)) // Initialise Schnorr struct for long-term shared secret if not done so before if sid == LTSS && !jv.ltssInit { jv.ltssInit = true jv.schnorr.Init(jv.keyPair.Suite, jv.info, secret.secret) log.Lvl2(fmt.Sprintf("Node %d: %v Schnorr struct initialised", jv.Index(), sid)) } // Broadcast that we have finished setting up our shared secret msg := &SecConfMsg{ Src: jv.Index(), SID: sid, } if err := jv.Broadcast(msg); err != nil { return err } } return nil }
// Deploy copies all files to the run-directory func (d *Localhost) Deploy(rc RunConfig) error { if runtime.GOOS == "darwin" { files, err := exec.Command("ulimit", "-n").Output() if err != nil { log.Fatal("Couldn't check for file-limit:", err) } filesNbr, err := strconv.Atoi(strings.TrimSpace(string(files))) if err != nil { log.Fatal("Couldn't convert", files, "to a number:", err) } hosts, _ := strconv.Atoi(rc.Get("hosts")) if filesNbr < hosts*2 { maxfiles := 10000 + hosts*2 log.Fatalf("Maximum open files is too small. Please run the following command:\n"+ "sudo sysctl -w kern.maxfiles=%d\n"+ "sudo sysctl -w kern.maxfilesperproc=%d\n"+ "ulimit -n %d\n"+ "sudo sysctl -w kern.ipc.somaxconn=2048\n", maxfiles, maxfiles, maxfiles) } } d.servers, _ = strconv.Atoi(rc.Get("servers")) log.Lvl2("Localhost: Deploying and writing config-files for", d.servers, "servers") sim, err := sda.NewSimulation(d.Simulation, string(rc.Toml())) if err != nil { return err } d.addresses = make([]string, d.servers) for i := range d.addresses { d.addresses[i] = "localhost" + strconv.Itoa(i) } d.sc, err = sim.Setup(d.runDir, d.addresses) if err != nil { return err } d.sc.Config = string(rc.Toml()) if err := d.sc.Save(d.runDir); err != nil { return err } log.Lvl2("Localhost: Done deploying") return nil }
// Test sending data back and forth using the SendTo func TestHostSendDuplex(t *testing.T) { defer log.AfterTest(t) h1, h2 := SetupTwoHosts(t, false) msgSimple := &SimpleMessage{5} err := h1.SendRaw(h2.ServerIdentity, msgSimple) if err != nil { t.Fatal("Couldn't send message from h1 to h2", err) } msg := h2.Receive() log.Lvl2("Received msg h1 -> h2", msg) err = h2.SendRaw(h1.ServerIdentity, msgSimple) if err != nil { t.Fatal("Couldn't send message from h2 to h1", err) } msg = h1.Receive() log.Lvl2("Received msg h2 -> h1", msg) h1.Close() h2.Close() }
// start launches a new service func (s *serviceFactory) start(name string, c *Context, path string) (Service, error) { var id ServiceID var ok bool if id, ok = s.translations[name]; !ok { return nil, errors.New("No Service for this name: " + name) } var fn NewServiceFunc if fn, ok = s.constructors[id]; !ok { return nil, fmt.Errorf("No Service for this id: %+v", id) } serv := fn(c, path) log.Lvl2("Instantiated service", name) return serv, nil }
// nodeDelete needs to be separated from nodeDone, as it is also called from // Close, but due to locking-issues here we don't lock. func (o *Overlay) nodeDelete(tok *Token) { tni, ok := o.instances[tok.ID()] if !ok { log.Lvl2("Node", tok.ID(), "already gone") return } log.Lvl4("Closing node", tok.ID()) err := tni.Close() if err != nil { log.Error("Error while closing node:", err) } delete(o.instances, tok.ID()) // mark it done ! o.instancesInfo[tok.ID()] = true }
// PrePrepare intializes a full run of the protocol. func (p *Protocol) PrePrepare() error { // pre-prepare: broadcast the block var err error log.Lvl2(p.Name(), "Broadcast PrePrepare") prep := &PrePrepare{p.trBlock} p.broadcast(func(tn *sda.TreeNode) { tempErr := p.SendTo(tn, prep) if tempErr != nil { err = tempErr } p.state = statePrepare }) log.Lvl3(p.Name(), "Broadcast PrePrepare DONE") return err }
// Build makes sure that the binary is available for our local platform func (d *Localhost) Build(build string, arg ...string) error { src := "./cothority" dst := d.runDir + "/" + d.Simulation start := time.Now() // build for the local machine res, err := Build(src, dst, runtime.GOARCH, runtime.GOOS, arg...) if err != nil { log.Fatal("Error while building for localhost (src", src, ", dst", dst, ":", res) } log.Lvl3("Localhost: Build src", src, ", dst", dst) log.Lvl4("Localhost: Results of localhost build:", res) log.Lvl2("Localhost: build finished in", time.Since(start)) return err }
// returns a tuple of start and stop configurations to run func getStartStop(rcs int) (int, int) { ssStr := strings.Split(simRange, ":") start, err := strconv.Atoi(ssStr[0]) stop := rcs - 1 if err == nil { stop = start if len(ssStr) > 1 { stop, err = strconv.Atoi(ssStr[1]) if err != nil { stop = rcs } } } log.Lvl2("Range is", start, ":", stop) return start, stop }
func TestNtree(t *testing.T) { defer log.AfterTest(t) log.TestOutput(testing.Verbose(), 4) for _, nbrHosts := range []int{1, 3, 13} { log.Lvl2("Running ntree with", nbrHosts, "hosts") local := sda.NewLocalTest() _, _, tree := local.GenBigTree(nbrHosts, nbrHosts, 3, true, true) done := make(chan bool) // create the message we want to sign for this round msg := []byte("Ntree rocks slowly") // Register the function generating the protocol instance var root *ntree.Protocol // function that will be called when protocol is finished by the root doneFunc := func() bool { done <- true return true } // Start the protocol pi, err := local.CreateProtocol(tree, "NaiveTree") if err != nil { t.Fatal("Couldn't create new node:", err) } root = pi.(*ntree.Protocol) root.Message = msg root.OnDoneCallback(doneFunc) err = pi.Start() if nbrHosts == 1 { if err == nil { t.Fatal("Shouldn't be able to start NTree with 1 node") } } else if err != nil { t.Fatal("Couldn't start protocol:", err) } else { select { case <-done: case <-time.After(time.Second * 2): t.Fatal("Protocol didn't finish in time") } } local.CloseAll() } }
func TestBlocking(t *testing.T) { defer log.AfterTest(t) l := sda.NewLocalTest() _, _, tree := l.GenTree(2, true, true, true) defer l.CloseAll() n1, err := l.StartProtocol("ProtocolBlocking", tree) if err != nil { t.Fatal("Couldn't start protocol") } n2, err := l.StartProtocol("ProtocolBlocking", tree) if err != nil { t.Fatal("Couldn't start protocol") } p1 := n1.(*BlockingProtocol) p2 := n2.(*BlockingProtocol) tn1 := p1.TreeNodeInstance tn2 := p2.TreeNodeInstance go func() { // Send two messages to n1, which blocks the old interface err := l.SendTreeNode("", tn2, tn1, &NodeTestMsg{}) if err != nil { t.Fatal("Couldn't send message:", err) } err = l.SendTreeNode("", tn2, tn1, &NodeTestMsg{}) if err != nil { t.Fatal("Couldn't send message:", err) } // Now send a message to n2, but in the old interface this // blocks. err = l.SendTreeNode("", tn1, tn2, &NodeTestMsg{}) if err != nil { t.Fatal("Couldn't send message:", err) } }() // Release p2 p2.stopBlockChan <- true select { case <-p2.doneChan: log.Lvl2("Node 2 done") p1.stopBlockChan <- true <-p1.doneChan case <-time.After(time.Second): t.Fatal("Node 2 didn't receive") } }
// Stop will close every connections it has // And will stop updating the stats func (m *Monitor) Stop() { log.Lvl2("Monitor Stop") m.listenerLock.Lock() if m.listener != nil { if err := m.listener.Close(); err != nil { log.Error("Couldn't close listener:", err) } } m.listenerLock.Unlock() m.mutexConn.Lock() for _, c := range m.conns { if err := c.Close(); err != nil { log.Error("Couldn't close connection:", err) } } m.mutexConn.Unlock() }