// NewDataFilter returns a new data filter initialized with the rights values // taken out from the run config. If absent, will take defaults values. // Keys expected are: // discard_measurementname = perc => will take the lower and upper percentile = // perc // discard_measurementname = lower,upper => will take different percentiles func NewDataFilter(config map[string]string) DataFilter { df := DataFilter{ percentiles: make(map[string]float64), } reg, err := regexp.Compile("filter_(\\w+)") if err != nil { log.Lvl1("DataFilter: Error compiling regexp:", err) return df } // analyse the each entry for k, v := range config { if measure := reg.FindString(k); measure == "" { continue } else { // this value must be filtered by how many ? perc, err := strconv.ParseFloat(v, 64) if err != nil { log.Lvl1("DataFilter: Cannot parse value for filter measure:", measure) continue } measure = strings.Replace(measure, "filter_", "", -1) df.percentiles[measure] = perc } } log.Lvl3("Filtering:", df.percentiles) return df }
// The core of the file: read any input from the connection and outputs it into // the server connection func proxyConnection(conn net.Conn, done chan bool) { dec := json.NewDecoder(conn) nerr := 0 for { m := SingleMeasure{} // Receive data if err := dec.Decode(&m); err != nil { if err == io.EOF { break } log.Lvl1("Error receiving data from", conn.RemoteAddr().String(), ":", err) nerr++ if nerr > 1 { log.Lvl1("Too many errors from", conn.RemoteAddr().String(), ": Abort connection") break } } log.Lvl3("Proxy received", m) // Proxy data back to monitor if err := serverEnc.Encode(m); err != nil { log.Lvl2("Error proxying data :", err) break } if m.Name == "end" { // the end log.Lvl2("Proxy detected end of measurement. Closing connection.") break } } if err := conn.Close(); err != nil { log.Error("Couldn't close connection:", err) } done <- true }
func verify(m []byte) bool { countMut.Lock() veriCount++ log.Lvl1("Verification called", veriCount, "times") countMut.Unlock() log.Lvl1("Ignoring message:", string(m)) // everything is OK, always: return true }
func TestServiceProtocolProcessMessage(t *testing.T) { ds := &DummyService{ link: make(chan bool), } var count int sda.RegisterNewService("DummyService", func(c *sda.Context, path string) sda.Service { if count == 0 { count++ // the client does not need a Service return &DummyService{link: make(chan bool)} } ds.c = c ds.path = path ds.Config = DummyConfig{ Send: true, } return ds }) // fake a client h2 := sda.NewLocalHost(2010) defer h2.Close() host := sda.NewLocalHost(2000) host.ListenAndBind() host.StartProcessMessages() log.Lvl1("Host created and listening") defer host.Close() // create the entityList and tree el := sda.NewRoster([]*network.ServerIdentity{host.ServerIdentity}) tree := el.GenerateBinaryTree() // give it to the service ds.fakeTree = tree // Send a request to the service b, err := network.MarshalRegisteredType(&DummyMsg{10}) log.ErrFatal(err) re := &sda.ClientRequest{ Service: sda.ServiceFactory.ServiceID("DummyService"), Data: b, } log.Lvl1("Client connecting to host") if _, err := h2.Connect(host.ServerIdentity); err != nil { t.Fatal(err) } log.Lvl1("Sending request to service...") if err := h2.SendRaw(host.ServerIdentity, re); err != nil { t.Fatal(err) } // wait for the link from the protocol waitOrFatalValue(ds.link, true, t) // now wait for the same link as the protocol should have sent a message to // himself ! waitOrFatalValue(ds.link, true, t) }
// Test if a request that makes the service create a new protocol works func TestServiceRequestNewProtocol(t *testing.T) { ds := &DummyService{ link: make(chan bool), } sda.RegisterNewService("DummyService", func(c *sda.Context, path string) sda.Service { ds.c = c ds.path = path return ds }) host := sda.NewLocalHost(2000) host.Listen() host.StartProcessMessages() log.Lvl1("Host created and listening") defer host.Close() // create the entityList and tree el := sda.NewRoster([]*network.ServerIdentity{host.ServerIdentity}) tree := el.GenerateBinaryTree() // give it to the service ds.fakeTree = tree // Send a request to the service b, err := network.MarshalRegisteredType(&DummyMsg{10}) log.ErrFatal(err) re := &sda.ClientRequest{ Service: sda.ServiceFactory.ServiceID("DummyService"), Data: b, } // fake a client h2 := sda.NewLocalHost(2010) defer h2.Close() log.Lvl1("Client connecting to host") if _, err := h2.Connect(host.ServerIdentity); err != nil { t.Fatal(err) } log.Lvl1("Sending request to service...") if err := h2.SendRaw(host.ServerIdentity, re); err != nil { t.Fatal(err) } // wait for the link from the waitOrFatalValue(ds.link, true, t) // Now RESEND the value so we instantiate using the SAME TREENODE log.Lvl1("Sending request AGAIN to service...") if err := h2.SendRaw(host.ServerIdentity, re); err != nil { t.Fatal(err) } // wait for the link from the // NOW expect false waitOrFatalValue(ds.link, false, t) }
// Send transmits the given struct over the network. func send(v interface{}) error { if encoder == nil { return fmt.Errorf("Monitor's sink connection not initalized. Can not send any measures") } if !enabled { return nil } // For a large number of clients (˜10'000), the connection phase // can take some time. This is a linear backoff to enable connection // even when there are a lot of request: var ok bool var err error for wait := 500; wait < 1000; wait += 100 { if err = encoder.Encode(v); err == nil { ok = true break } log.Lvl1("Couldn't send to monitor-sink:", err) time.Sleep(time.Duration(wait) * time.Millisecond) continue } if !ok { return errors.New("Could not send any measures") } return nil }
// Start will execute one cothority-binary for each server // configured func (d *Localhost) Start(args ...string) error { if err := os.Chdir(d.runDir); err != nil { return err } log.Lvl4("Localhost: chdir into", d.runDir) ex := d.runDir + "/" + d.Simulation d.running = true log.Lvl1("Starting", d.servers, "applications of", ex) for index := 0; index < d.servers; index++ { d.wgRun.Add(1) log.Lvl3("Starting", index) host := "localhost" + strconv.Itoa(index) cmdArgs := []string{"-address", host, "-monitor", "localhost:" + strconv.Itoa(d.monitorPort), "-simul", d.Simulation, "-debug", strconv.Itoa(log.DebugVisible()), } cmdArgs = append(args, cmdArgs...) log.Lvl3("CmdArgs are", cmdArgs) cmd := exec.Command(ex, cmdArgs...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr go func(i int, h string) { log.Lvl3("Localhost: will start host", h) err := cmd.Run() if err != nil { log.Error("Error running localhost", h, ":", err) d.errChan <- err } d.wgRun.Done() log.Lvl3("host (index", i, ")", h, "done") }(index, host) } return nil }
// RegisterByName takes a name, creates a ServiceID out of it and stores the // mapping and the creation function. func (s *serviceFactory) Register(name string, fn NewServiceFunc) { id := ServiceID(uuid.NewV5(uuid.NamespaceURL, name)) if _, ok := s.constructors[id]; ok { // called at init time so better panic than to continue log.Lvl1("RegisterService():", name) } s.constructors[id] = fn s.translations[name] = id s.inverseTr[id] = name }
func TestServiceProcessServiceMessage(t *testing.T) { ds1 := &DummyService{ link: make(chan bool), } ds2 := &DummyService{ link: make(chan bool), } var count int sda.RegisterNewService("DummyService", func(c *sda.Context, path string) sda.Service { var s *DummyService if count == 0 { s = ds1 } else { s = ds2 } s.c = c s.path = path return s }) // create two hosts h2 := sda.NewLocalHost(2010) defer h2.Close() h1 := sda.NewLocalHost(2000) h1.ListenAndBind() h1.StartProcessMessages() defer h1.Close() log.Lvl1("Host created and listening") // connect themselves log.Lvl1("Client connecting to host") if _, err := h2.Connect(h1.ServerIdentity); err != nil { t.Fatal(err) } // create request m, err := sda.CreateServiceMessage("DummyService", &DummyMsg{10}) assert.Nil(t, err) log.Lvl1("Sending request to service...") assert.Nil(t, h2.SendRaw(h1.ServerIdentity, m)) // wait for the link from the Service on host 1 waitOrFatalValue(ds1.link, true, t) }
// CreateClientRequest creates a Request message out of any message that is // destined to a Service. XXX For the moment it uses protobuf, as it is already // handling abstract.Scalar/Public stuff that json can't do. Later we may want // to think on how to change that. func CreateClientRequest(service string, r interface{}) (*ClientRequest, error) { sid := ServiceFactory.ServiceID(service) log.Lvl1("Name", service, " <-> ServiceID", sid.String()) buff, err := network.MarshalRegisteredType(r) if err != nil { return nil, err } return &ClientRequest{ Service: sid, Data: buff, }, nil }
func TestServiceProcessRequest(t *testing.T) { ds := &DummyService{ link: make(chan bool), } sda.RegisterNewService("DummyService", func(c *sda.Context, path string) sda.Service { ds.c = c ds.path = path return ds }) host := sda.NewLocalHost(2000) host.Listen() host.StartProcessMessages() log.Lvl1("Host created and listening") defer host.Close() // Send a request to the service re := &sda.ClientRequest{ Service: sda.ServiceFactory.ServiceID("DummyService"), Data: []byte("a"), } // fake a client h2 := sda.NewLocalHost(2010) defer h2.Close() log.Lvl1("Client connecting to host") if _, err := h2.Connect(host.ServerIdentity); err != nil { t.Fatal(err) } log.Lvl1("Sending request to service...") if err := h2.SendRaw(host.ServerIdentity, re); err != nil { t.Fatal(err) } // wait for the link select { case v := <-ds.link: if v { t.Fatal("was expecting false !") } case <-time.After(100 * time.Millisecond): t.Fatal("Too late") } }
func TestClient_Parallel(t *testing.T) { nbrNodes := 2 nbrParallel := 2 local := sda.NewLocalTest() defer local.CloseAll() // register service sda.RegisterNewService("BackForth", func(c *sda.Context, path string) sda.Service { return &simpleService{ ctx: c, } }) // create hosts hosts, el, _ := local.GenTree(nbrNodes, true, true, false) wg := sync.WaitGroup{} wg.Add(nbrParallel) for i := 0; i < nbrParallel; i++ { go func(i int) { log.Lvl1("Starting message", i) r := &simpleRequest{ ServerIdentities: el, Val: 10 * i, } client := sda.NewClient("BackForth") nm, err := client.Send(hosts[0].ServerIdentity, r) log.ErrFatal(err) assert.Equal(t, nm.MsgType, simpleResponseType) resp := nm.Msg.(simpleResponse) assert.Equal(t, resp.Val, 10*i) log.Lvl1("Done with message", i) wg.Done() }(i) } wg.Wait() }
// Reads in the platform that we want to use and prepares for the tests func main() { flag.Parse() deployP = platform.NewPlatform(platformDst) if deployP == nil { log.Fatal("Platform not recognized.", platformDst) } log.Lvl1("Deploying to", platformDst) simulations := flag.Args() if len(simulations) == 0 { log.Fatal("Please give a simulation to run") } for _, simulation := range simulations { runconfigs := platform.ReadRunFile(deployP, simulation) if len(runconfigs) == 0 { log.Fatal("No tests found in", simulation) } deployP.Configure(&platform.Config{ MonitorPort: monitorPort, Debug: log.DebugVisible(), }) if clean { err := deployP.Deploy(runconfigs[0]) if err != nil { log.Fatal("Couldn't deploy:", err) } if err := deployP.Cleanup(); err != nil { log.Error("Couldn't cleanup correctly:", err) } } else { logname := strings.Replace(filepath.Base(simulation), ".toml", "", 1) testsDone := make(chan bool) go func() { RunTests(logname, runconfigs) testsDone <- true }() timeout := getExperimentWait(runconfigs) select { case <-testsDone: log.Lvl3("Done with test", simulation) case <-time.After(time.Second * time.Duration(timeout)): log.Fatal("Test failed to finish in", timeout, "seconds") } } } }
// ReadTomlConfig read any structure from a toml-file // Takes a filename and an optional directory-name func ReadTomlConfig(conf interface{}, filename string, dirOpt ...string) error { buf, err := ioutil.ReadFile(getFullName(filename, dirOpt...)) if err != nil { pwd, _ := os.Getwd() log.Lvl1("Didn't find", filename, "in", pwd) return err } _, err = toml.Decode(string(buf), conf) if err != nil { log.Fatal(err) } return nil }
// DownloadBlock takes 'dir' as the directory where to download the block. // It returns the downloaded file func DownloadBlock(dir string) (string, error) { blockDir := SimulDirToBlockDir(dir) cmd := exec.Command("wget", "--no-check-certificate", "-O", blockDir+"/blk00000.dat", "-c", "https://icsil1-box.epfl.ch:5001/fbsharing/IzTFdOxf") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr log.Lvl1("Cmd is", cmd) if err := cmd.Start(); err != nil { return "", err } if err := cmd.Wait(); err != nil { return "", err } return GetBlockName(dir), nil }
func TestReconnection(t *testing.T) { defer log.AfterTest(t) h1 := sda.NewLocalHost(2000) h2 := sda.NewLocalHost(2001) defer h1.Close() defer h2.Close() h1.ListenAndBind() h2.ListenAndBind() log.Lvl1("Sending h1->h2") log.ErrFatal(sendrcv(h1, h2)) log.Lvl1("Sending h2->h1") log.ErrFatal(sendrcv(h2, h1)) log.Lvl1("Closing h1") h1.CloseConnections() log.Lvl1("Listening again on h1") h1.ListenAndBind() log.Lvl1("Sending h2->h1") log.ErrFatal(sendrcv(h2, h1)) log.Lvl1("Sending h1->h2") log.ErrFatal(sendrcv(h1, h2)) log.Lvl1("Shutting down listener of h2") // closing h2, but simulate *hard* failure, without sending a FIN packet c2 := h1.Connection(h2.ServerIdentity) // making h2 fails h2.AbortConnections() log.Lvl1("asking h2 to listen again") // making h2 backup again h2.ListenAndBind() // and re-registering the connection to h2 from h1 h1.RegisterConnection(h2.ServerIdentity, c2) log.Lvl1("Sending h1->h2") log.ErrFatal(sendrcv(h1, h2)) }
// Run implements sda.Simulation interface func (e *Simulation) Run(sdaConf *sda.SimulationConfig) error { log.Lvl2("Naive Tree Simulation starting with: Rounds=", e.Rounds) server := NewNtreeServer(e.Blocksize) for round := 0; round < e.Rounds; round++ { client := byzcoin.NewClient(server) err := client.StartClientSimulation(blockchain.GetBlockDir(), e.Blocksize) if err != nil { log.Error("ClientSimulation:", err) } log.Lvl1("Starting round", round) // create an empty node node := sdaConf.Overlay.NewTreeNodeInstanceFromProtoName(sdaConf.Tree, "ByzCoinNtree") // instantiate a byzcoin protocol rComplete := monitor.NewTimeMeasure("round") pi, err := server.Instantiate(node) if err != nil { return err } sdaConf.Overlay.RegisterProtocolInstance(pi) nt := pi.(*Ntree) // Register when the protocol is finished (all the nodes have finished) done := make(chan bool) nt.RegisterOnDone(func(sig *NtreeSignature) { rComplete.Record() log.Lvl3("Done") done <- true }) go func() { if err := nt.Start(); err != nil { log.Error("Couldn't start ntree protocol:", err) } }() // wait for the end <-done log.Lvl3("Round", round, "finished") } return nil }
// Run is used on the destination machines and runs a number of // rounds func (e *simulation) Run(config *sda.SimulationConfig) error { size := config.Tree.Size() log.Lvl2("Size is:", size, "rounds:", e.Rounds) for round := 0; round < e.Rounds; round++ { log.Lvl1("Starting round", round) round := monitor.NewTimeMeasure("round") p, err := config.Overlay.CreateProtocolSDA(config.Tree, "Count") if err != nil { return err } go p.Start() children := <-p.(*ProtocolCount).Count round.Record() if children != size { return errors.New("Didn't get " + strconv.Itoa(size) + " children") } } return nil }
func (sr *BlockReply) MarshalJSON() ([]byte, error) { type Alias BlockReply var b bytes.Buffer suite, err := suites.StringToSuite(sr.SuiteStr) if err != nil { return nil, err } //log.Print("Preparing abstracts") if err := suite.Write(&b, sr.Response, sr.Challenge, sr.AggCommit, sr.AggPublic); err != nil { log.Lvl1("encoding stampreply response/challenge/AggCommit:", err) return nil, err } //log.Print("Returning helper-struct") return json.Marshal(&struct { SignatureInfo []byte *Alias }{ SignatureInfo: b.Bytes(), Alias: (*Alias)(sr), }) }
func TestJVSS(t *testing.T) { // Setup parameters var name string = "JVSS" // Protocol name var nodes uint32 = 5 // Number of nodes var rounds int = 3 // Number of rounds msg := []byte("Hello World!") // Message to-be-signed local := sda.NewLocalTest() _, _, tree := local.GenTree(int(nodes), false, true, true) defer local.CloseAll() log.TestOutput(testing.Verbose(), 1) log.Lvl1("JVSS - starting") leader, err := local.CreateProtocol(tree, name) if err != nil { t.Fatal("Couldn't initialise protocol tree:", err) } jv := leader.(*jvss.JVSS) leader.Start() log.Lvl1("JVSS - setup done") for i := 0; i < rounds; i++ { log.Lvl1("JVSS - starting round", i) log.Lvl1("JVSS - requesting signature") sig, _ := jv.Sign(msg) log.Lvl1("JVSS - signature received") err = jv.Verify(msg, sig) if err != nil { t.Fatal("Error signature verification failed", err) } log.Lvl1("JVSS - signature verification succeded") } }
// Run runs the simulation func (e *Simulation) Run(sdaConf *sda.SimulationConfig) error { doneChan := make(chan bool) doneCB := func() { doneChan <- true } // FIXME use client instead dir := blockchain.GetBlockDir() parser, err := blockchain.NewParser(dir, magicNum) if err != nil { log.Error("Error: Couldn't parse blocks in", dir) return err } transactions, err := parser.Parse(0, e.Blocksize) if err != nil { log.Error("Error while parsing transactions", err) return err } // FIXME c&p from byzcoin.go trlist := blockchain.NewTransactionList(transactions, len(transactions)) header := blockchain.NewHeader(trlist, "", "") trblock := blockchain.NewTrBlock(trlist, header) // Here we first setup the N^2 connections with a broadcast protocol pi, err := sdaConf.Overlay.CreateProtocolSDA(sdaConf.Tree, "Broadcast") if err != nil { log.Error(err) } proto := pi.(*manage.Broadcast) // channel to notify we are done broadDone := make(chan bool) proto.RegisterOnDone(func() { broadDone <- true }) // ignore error on purpose: Start always returns nil _ = proto.Start() // wait <-broadDone log.Lvl3("Simulation can start!") for round := 0; round < e.Rounds; round++ { log.Lvl1("Starting round", round) p, err := sdaConf.Overlay.CreateProtocolSDA(sdaConf.Tree, "ByzCoinPBFT") if err != nil { return err } proto := p.(*Protocol) proto.trBlock = trblock proto.onDoneCB = doneCB r := monitor.NewTimeMeasure("round_pbft") err = proto.Start() if err != nil { log.Error("Couldn't start PrePrepare") return err } // wait for finishing pbft: <-doneChan r.Record() log.Lvl2("Finished round", round) } return nil }
func (c *ServiceChannels) NewProtocol(tn *sda.TreeNodeInstance, conf *sda.GenericConfig) (sda.ProtocolInstance, error) { log.Lvl1("Cosi Service received New Protocol event") return NewProtocolChannels(tn) }
func main() { // init with deter.toml deter := deterFromConfig() flag.Parse() // kill old processes var wg sync.WaitGroup re := regexp.MustCompile(" +") hosts, err := exec.Command("/usr/testbed/bin/node_list", "-e", deter.Project+","+deter.Experiment).Output() if err != nil { log.Fatal("Deterlab experiment", deter.Project+"/"+deter.Experiment, "seems not to be swapped in. Aborting.") os.Exit(-1) } hostsTrimmed := strings.TrimSpace(re.ReplaceAllString(string(hosts), " ")) hostlist := strings.Split(hostsTrimmed, " ") doneHosts := make([]bool, len(hostlist)) log.Lvl2("Found the following hosts:", hostlist) if kill { log.Lvl1("Cleaning up", len(hostlist), "hosts.") } for i, h := range hostlist { wg.Add(1) go func(i int, h string) { defer wg.Done() if kill { log.Lvl3("Cleaning up host", h, ".") runSSH(h, "sudo killall -9 cothority scp 2>/dev/null >/dev/null") time.Sleep(1 * time.Second) runSSH(h, "sudo killall -9 cothority 2>/dev/null >/dev/null") time.Sleep(1 * time.Second) // Also kill all other process that start with "./" and are probably // locally started processes runSSH(h, "sudo pkill -9 -f '\\./'") time.Sleep(1 * time.Second) if log.DebugVisible() > 3 { log.Lvl4("Cleaning report:") _ = platform.SSHRunStdout("", h, "ps aux") } } else { log.Lvl3("Setting the file-limit higher on", h) // Copy configuration file to make higher file-limits err := platform.SSHRunStdout("", h, "sudo cp remote/cothority.conf /etc/security/limits.d") if err != nil { log.Fatal("Couldn't copy limit-file:", err) } } doneHosts[i] = true log.Lvl3("Host", h, "cleaned up") }(i, h) } cleanupChannel := make(chan string) go func() { wg.Wait() log.Lvl3("Done waiting") cleanupChannel <- "done" }() select { case msg := <-cleanupChannel: log.Lvl3("Received msg from cleanupChannel", msg) case <-time.After(time.Second * 20): for i, m := range doneHosts { if !m { log.Lvl1("Missing host:", hostlist[i], "- You should run") log.Lvl1("/usr/testbed/bin/node_reboot", hostlist[i]) } } log.Fatal("Didn't receive all replies while cleaning up - aborting.") } if kill { log.Lvl2("Only cleaning up - returning") return } // ADDITIONS : the monitoring part // Proxy will listen on Sink:SinkPort and redirect every packet to // RedirectionAddress:SinkPort-1. With remote tunnel forwarding it will // be forwarded to the real sink proxyAddress := deter.ProxyAddress + ":" + strconv.Itoa(deter.MonitorPort+1) log.Lvl2("Launching proxy redirecting to", proxyAddress) err = monitor.Proxy(proxyAddress) if err != nil { log.Fatal("Couldn't start proxy:", err) } log.Lvl1("starting", deter.Servers, "cothorities for a total of", deter.Hosts, "processes.") killing := false for i, phys := range deter.Phys { log.Lvl2("Launching cothority on", phys) wg.Add(1) go func(phys, internal string) { //log.Lvl4("running on", phys, cmd) defer wg.Done() monitorAddr := deter.MonitorAddress + ":" + strconv.Itoa(deter.MonitorPort) log.Lvl4("Starting servers on physical machine ", internal, "with monitor = ", monitorAddr) args := " -address=" + internal + " -simul=" + deter.Simulation + " -monitor=" + monitorAddr + " -debug=" + strconv.Itoa(log.DebugVisible()) log.Lvl3("Args is", args) err := platform.SSHRunStdout("", phys, "cd remote; sudo ./cothority "+ args) if err != nil && !killing { log.Lvl1("Error starting cothority - will kill all others:", err, internal) killing = true err := exec.Command("killall", "ssh").Run() if err != nil { log.Fatal("Couldn't killall ssh:", err) } } log.Lvl4("Finished with cothority on", internal) }(phys, deter.Virt[i]) } // wait for the servers to finish before stopping wg.Wait() }
// Main starts the host and will setup the protocol. func main() { flag.Parse() log.SetDebugVisible(debugVisible) log.Lvl3("Flags are:", hostAddress, simul, log.DebugVisible, monitorAddress) scs, err := sda.LoadSimulationConfig(".", hostAddress) measures := make([]*monitor.CounterIOMeasure, len(scs)) if err != nil { // We probably are not needed log.Lvl2(err, hostAddress) return } if monitorAddress != "" { if err := monitor.ConnectSink(monitorAddress); err != nil { log.Error("Couldn't connect monitor to sink:", err) } } sims := make([]sda.Simulation, len(scs)) var rootSC *sda.SimulationConfig var rootSim sda.Simulation for i, sc := range scs { // Starting all hosts for that server host := sc.Host measures[i] = monitor.NewCounterIOMeasure("bandwidth", host) log.Lvl3(hostAddress, "Starting host", host.ServerIdentity.Addresses) host.Listen() host.StartProcessMessages() sim, err := sda.NewSimulation(simul, sc.Config) if err != nil { log.Fatal(err) } err = sim.Node(sc) if err != nil { log.Fatal(err) } sims[i] = sim if host.ServerIdentity.ID == sc.Tree.Root.ServerIdentity.ID { log.Lvl2(hostAddress, "is root-node, will start protocol") rootSim = sim rootSC = sc } } if rootSim != nil { // If this cothority has the root-host, it will start the simulation log.Lvl2("Starting protocol", simul, "on host", rootSC.Host.ServerIdentity.Addresses) //log.Lvl5("Tree is", rootSC.Tree.Dump()) // First count the number of available children childrenWait := monitor.NewTimeMeasure("ChildrenWait") wait := true // The timeout starts with 1 second, which is the time of response between // each level of the tree. timeout := 1000 for wait { p, err := rootSC.Overlay.CreateProtocolSDA(rootSC.Tree, "Count") if err != nil { log.Fatal(err) } proto := p.(*manage.ProtocolCount) proto.SetTimeout(timeout) proto.Start() log.Lvl1("Started counting children with timeout of", timeout) select { case count := <-proto.Count: if count == rootSC.Tree.Size() { log.Lvl1("Found all", count, "children") wait = false } else { log.Lvl1("Found only", count, "children, counting again") } } // Double the timeout and try again if not successful. timeout *= 2 } childrenWait.Record() log.Lvl1("Starting new node", simul) measureNet := monitor.NewCounterIOMeasure("bandwidth_root", rootSC.Host) err := rootSim.Run(rootSC) if err != nil { log.Fatal(err) } measureNet.Record() // Test if all ServerIdentities are used in the tree, else we'll run into // troubles with CloseAll if !rootSC.Tree.UsesList() { log.Error("The tree doesn't use all ServerIdentities from the list!\n" + "This means that the CloseAll will fail and the experiment never ends!") } closeTree := rootSC.Tree if rootSC.GetSingleHost() { // In case of "SingleHost" we need a new tree that contains every // entity only once, whereas rootSC.Tree will have the same // entity at different TreeNodes, which makes it difficult to // correctly close everything. log.Lvl2("Making new root-tree for SingleHost config") closeTree = rootSC.Roster.GenerateBinaryTree() rootSC.Overlay.RegisterTree(closeTree) } pi, err := rootSC.Overlay.CreateProtocolSDA(closeTree, "CloseAll") pi.Start() if err != nil { log.Fatal(err) } } // Wait for all hosts to be closed allClosed := make(chan bool) go func() { for i, sc := range scs { sc.Host.WaitForClose() // record the bandwidth measures[i].Record() log.Lvl3(hostAddress, "Simulation closed host", sc.Host.ServerIdentity.Addresses, "closed") } allClosed <- true }() log.Lvl3(hostAddress, scs[0].Host.ServerIdentity.First(), "is waiting for all hosts to close") <-allClosed log.Lvl2(hostAddress, "has all hosts closed") monitor.EndAndCleanup() }
// test for calling the NewProtocol method on a remote Service func TestServiceNewProtocol(t *testing.T) { ds1 := &DummyService{ link: make(chan bool), Config: DummyConfig{ Send: true, }, } ds2 := &DummyService{ link: make(chan bool), } var count int sda.RegisterNewService("DummyService", func(c *sda.Context, path string) sda.Service { var localDs *DummyService switch count { case 2: // the client does not need a Service return &DummyService{link: make(chan bool)} case 1: // children localDs = ds2 case 0: // root localDs = ds1 } localDs.c = c localDs.path = path count++ return localDs }) host := sda.NewLocalHost(2000) host.ListenAndBind() host.StartProcessMessages() log.Lvl1("Host created and listening") defer host.Close() host2 := sda.NewLocalHost(2002) host2.ListenAndBind() host2.StartProcessMessages() defer host2.Close() // create the entityList and tree el := sda.NewRoster([]*network.ServerIdentity{host.ServerIdentity, host2.ServerIdentity}) tree := el.GenerateBinaryTree() // give it to the service ds1.fakeTree = tree // Send a request to the service b, err := network.MarshalRegisteredType(&DummyMsg{10}) log.ErrFatal(err) re := &sda.ClientRequest{ Service: sda.ServiceFactory.ServiceID("DummyService"), Data: b, } // fake a client client := sda.NewLocalHost(2010) defer client.Close() log.Lvl1("Client connecting to host") if _, err := client.Connect(host.ServerIdentity); err != nil { t.Fatal(err) } log.Lvl1("Sending request to service...") if err := client.SendRaw(host.ServerIdentity, re); err != nil { t.Fatal(err) } // wait for the link from the protocol that Starts waitOrFatalValue(ds1.link, true, t) // now wait for the same link as the protocol should have sent a message to // himself ! waitOrFatalValue(ds1.link, true, t) // now wait for the SECOND LINK on the SECOND HOST that the SECOND SERVICE // should have started (ds2) in ProcessRequest waitOrFatalValue(ds2.link, true, t) }
// Proxy will launch a routine that waits for input connections // It takes a redirection address soas to where redirect incoming packets // Proxy will listen on Sink:SinkPort variables so that the user do not // differentiate between connecting to a proxy or directly to the sink // It will panic if it can not contact the server or can not bind to the address func Proxy(redirection string) error { // Connect to the sink if err := connectToSink(redirection); err != nil { return err } log.Lvl2("Proxy connected to sink", redirection) // The proxy listens on the port one lower than itself _, port, err := net.SplitHostPort(redirection) if err != nil { log.Fatal("Couldn't get port-numbre from", redirection) } portNbr, err := strconv.Atoi(port) if err != nil { log.Fatal("Couldn't convert", port, "to a number") } sinkAddr := Sink + ":" + strconv.Itoa(portNbr-1) ln, err := net.Listen("tcp", sinkAddr) if err != nil { return fmt.Errorf("Error while binding proxy to addr %s: %v", sinkAddr, err) } log.Lvl2("Proxy listening on", sinkAddr) newConn := make(chan bool) closeConn := make(chan bool) finished := false proxyConns := make(map[string]*json.Encoder) // Listen for incoming connections go func() { for finished == false { conn, err := ln.Accept() if err != nil { operr, ok := err.(*net.OpError) // the listener is closed if ok && operr.Op == "accept" { break } log.Lvl1("Error proxy accepting connection:", err) continue } log.Lvl3("Proxy accepting incoming connection from:", conn.RemoteAddr().String()) newConn <- true proxyConns[conn.RemoteAddr().String()] = json.NewEncoder(conn) go proxyConnection(conn, closeConn) } }() go func() { // notify every new connection and every end of connection. When all // connections are closed, send an "end" measure to the sink. var nconn int for finished == false { select { case <-newConn: nconn++ case <-closeConn: nconn-- if nconn == 0 { // everything is finished if err := serverEnc.Encode(NewSingleMeasure("end", 0)); err != nil { log.Error("Couldn't send 'end' message:", err) } if err := serverConn.Close(); err != nil { log.Error("Couldn't close server connection:", err) } if err := ln.Close(); err != nil { log.Error("Couldn't close listener:", err) } finished = true break } } } }() return nil }
// Run implements sda.Simulation interface func (e *Simulation) Run(sdaConf *sda.SimulationConfig) error { log.Lvl2("Simulation starting with: Rounds=", e.Rounds) server := NewByzCoinServer(e.Blocksize, e.TimeoutMs, e.Fail) pi, err := sdaConf.Overlay.CreateProtocolSDA(sdaConf.Tree, "Broadcast") if err != nil { return err } proto, _ := pi.(*manage.Broadcast) // channel to notify we are done broadDone := make(chan bool) proto.RegisterOnDone(func() { broadDone <- true }) // ignore error on purpose: Broadcast.Start() always returns nil _ = proto.Start() // wait <-broadDone for round := 0; round < e.Rounds; round++ { client := NewClient(server) err := client.StartClientSimulation(blockchain.GetBlockDir(), e.Blocksize) if err != nil { log.Error("Error in ClientSimulation:", err) return err } log.Lvl1("Starting round", round) // create an empty node tni := sdaConf.Overlay.NewTreeNodeInstanceFromProtoName(sdaConf.Tree, "ByzCoin") if err != nil { return err } // instantiate a byzcoin protocol rComplete := monitor.NewTimeMeasure("round") pi, err := server.Instantiate(tni) if err != nil { return err } sdaConf.Overlay.RegisterProtocolInstance(pi) bz := pi.(*ByzCoin) // Register callback for the generation of the signature ! bz.RegisterOnSignatureDone(func(sig *BlockSignature) { rComplete.Record() if err := verifyBlockSignature(tni.Suite(), tni.Roster().Aggregate, sig); err != nil { log.Error("Round", round, "failed:", err) } else { log.Lvl2("Round", round, "success") } }) // Register when the protocol is finished (all the nodes have finished) done := make(chan bool) bz.RegisterOnDone(func() { done <- true }) if e.Fail > 0 { go func() { err := bz.startAnnouncementPrepare() if err != nil { log.Error("Error while starting "+ "announcment prepare:", err) } }() // do not run bz.startAnnouncementCommit() } else { go func() { if err := bz.Start(); err != nil { log.Error("Couldn't start protocol", err) } }() } // wait for the end <-done log.Lvl3("Round", round, "finished") } return nil }
// RunTests the given tests and puts the output into the // given file name. It outputs RunStats in a CSV format. func RunTests(name string, runconfigs []platform.RunConfig) { if nobuild == false { if race { if err := deployP.Build(build, "-race"); err != nil { log.Error("Couln't finish build without errors:", err) } } else { if err := deployP.Build(build); err != nil { log.Error("Couln't finish build without errors:", err) } } } mkTestDir() rs := make([]*monitor.Stats, len(runconfigs)) // Try 10 times to run the test nTimes := 10 stopOnSuccess := true var f *os.File args := os.O_CREATE | os.O_RDWR | os.O_TRUNC // If a range is given, we only append if simRange != "" { args = os.O_CREATE | os.O_RDWR | os.O_APPEND } f, err := os.OpenFile(testFile(name), args, 0660) if err != nil { log.Fatal("error opening test file:", err) } defer func() { if err := f.Close(); err != nil { log.Error("Couln't close", f.Name()) } }() err = f.Sync() if err != nil { log.Fatal("error syncing test file:", err) } start, stop := getStartStop(len(runconfigs)) for i, t := range runconfigs { // Implement a simple range-argument that will skip checks not in range if i < start || i > stop { log.Lvl2("Skipping", t, "because of range") continue } // Waiting for the document-branch to be merged, then uncomment this //log.Lvl1("Starting run with parameters -", t.String()) // run test t nTimes times // take the average of all successful runs runs := make([]*monitor.Stats, 0, nTimes) for r := 0; r < nTimes; r++ { stats, err := RunTest(t) if err != nil { log.Error("Error running test, trying again:", err) continue } runs = append(runs, stats) if stopOnSuccess { break } } if len(runs) == 0 { log.Lvl1("unable to get any data for test:", t) continue } s := monitor.AverageStats(runs) if i == 0 { s.WriteHeader(f) } rs[i] = s rs[i].WriteValues(f) err = f.Sync() if err != nil { log.Fatal("error syncing data to test file:", err) } } }