예제 #1
0
파일: simul.go 프로젝트: nikirill/cothority
// CheckHosts verifies that there is either a 'Hosts' or a 'Depth/BF'
// -parameter in the Runconfig
func CheckHosts(rc platform.RunConfig) {
	hosts, _ := rc.GetInt("hosts")
	bf, _ := rc.GetInt("bf")
	depth, _ := rc.GetInt("depth")
	if hosts == 0 {
		if depth == 0 || bf == 0 {
			log.Fatal("No Hosts and no Depth or BF given - stopping")
		}
		hosts = calcHosts(bf, depth)
		rc.Put("hosts", strconv.Itoa(hosts))
	}
	if bf == 0 {
		if depth == 0 || hosts == 0 {
			log.Fatal("No BF and no Depth or hosts given - stopping")
		}
		bf = 2
		for calcHosts(bf, depth) < hosts {
			bf++
		}
		rc.Put("bf", strconv.Itoa(bf))
	}
	if depth == 0 {
		depth = 1
		for calcHosts(bf, depth) < hosts {
			depth++
		}
		rc.Put("depth", strconv.Itoa(depth))
	}
}
예제 #2
0
// CreateRoster creates an Roster with the host-names in 'addresses'.
// It creates 's.Hosts' entries, starting from 'port' for each round through
// 'addresses'
func (s *SimulationBFTree) CreateRoster(sc *SimulationConfig, addresses []string, port int) {
	start := time.Now()
	nbrAddr := len(addresses)
	if sc.PrivateKeys == nil {
		sc.PrivateKeys = make(map[string]abstract.Scalar)
	}
	hosts := s.Hosts
	if s.SingleHost {
		// If we want to work with a single host, we only make one
		// host per server
		log.Fatal("Not supported yet")
		hosts = nbrAddr
		if hosts > s.Hosts {
			hosts = s.Hosts
		}
	}
	localhosts := false
	listeners := make([]net.Listener, hosts)
	if strings.Contains(addresses[0], "localhost") {
		localhosts = true
	}
	entities := make([]*network.ServerIdentity, hosts)
	log.Lvl3("Doing", hosts, "hosts")
	key := config.NewKeyPair(network.Suite)
	for c := 0; c < hosts; c++ {
		key.Secret.Add(key.Secret,
			key.Suite.Scalar().One())
		key.Public.Add(key.Public,
			key.Suite.Point().Base())
		address := addresses[c%nbrAddr] + ":"
		if localhosts {
			// If we have localhosts, we have to search for an empty port
			var err error
			listeners[c], err = net.Listen("tcp", ":0")
			if err != nil {
				log.Fatal("Couldn't search for empty port:", err)
			}
			_, p, _ := net.SplitHostPort(listeners[c].Addr().String())
			address += p
			log.Lvl4("Found free port", address)
		} else {
			address += strconv.Itoa(port + c/nbrAddr)
		}
		entities[c] = network.NewServerIdentity(key.Public, address)
		sc.PrivateKeys[entities[c].Addresses[0]] = key.Secret
	}
	// And close all our listeners
	if localhosts {
		for _, l := range listeners {
			err := l.Close()
			if err != nil {
				log.Fatal("Couldn't close port:", l, err)
			}
		}
	}

	sc.Roster = NewRoster(entities)
	log.Lvl3("Creating entity List took: " + time.Now().Sub(start).String())
}
예제 #3
0
파일: utils.go 프로젝트: nikirill/cothority
// WriteTomlConfig write  any structure to a toml-file
// Takes a filename and an optional directory-name.
func WriteTomlConfig(conf interface{}, filename string, dirOpt ...string) {
	buf := new(bytes.Buffer)
	if err := toml.NewEncoder(buf).Encode(conf); err != nil {
		log.Fatal(err)
	}
	err := ioutil.WriteFile(getFullName(filename, dirOpt...), buf.Bytes(), 0660)
	if err != nil {
		log.Fatal(err)
	}
}
예제 #4
0
파일: simul.go 프로젝트: nikirill/cothority
// Reads in the platform that we want to use and prepares for the tests
func main() {
	flag.Parse()
	deployP = platform.NewPlatform(platformDst)
	if deployP == nil {
		log.Fatal("Platform not recognized.", platformDst)
	}
	log.Lvl1("Deploying to", platformDst)

	simulations := flag.Args()
	if len(simulations) == 0 {
		log.Fatal("Please give a simulation to run")
	}

	for _, simulation := range simulations {
		runconfigs := platform.ReadRunFile(deployP, simulation)

		if len(runconfigs) == 0 {
			log.Fatal("No tests found in", simulation)
		}
		deployP.Configure(&platform.Config{
			MonitorPort: monitorPort,
			Debug:       log.DebugVisible(),
		})

		if clean {
			err := deployP.Deploy(runconfigs[0])
			if err != nil {
				log.Fatal("Couldn't deploy:", err)
			}
			if err := deployP.Cleanup(); err != nil {
				log.Error("Couldn't cleanup correctly:", err)
			}
		} else {
			logname := strings.Replace(filepath.Base(simulation), ".toml", "", 1)
			testsDone := make(chan bool)
			go func() {
				RunTests(logname, runconfigs)
				testsDone <- true
			}()
			timeout := getExperimentWait(runconfigs)
			select {
			case <-testsDone:
				log.Lvl3("Done with test", simulation)
			case <-time.After(time.Second * time.Duration(timeout)):
				log.Fatal("Test failed to finish in", timeout, "seconds")
			}
		}
	}
}
예제 #5
0
파일: messg.go 프로젝트: nikirill/cothority
func (sr *BlockReply) UnmarshalJSON(dataJSON []byte) error {
	type Alias BlockReply
	//log.Print("Starting unmarshal")
	suite, err := suites.StringToSuite(sr.SuiteStr)
	if err != nil {
		return err
	}
	aux := &struct {
		SignatureInfo []byte
		Response      abstract.Scalar
		Challenge     abstract.Scalar
		AggCommit     abstract.Point
		AggPublic     abstract.Point
		*Alias
	}{
		Response:  suite.Scalar(),
		Challenge: suite.Scalar(),
		AggCommit: suite.Point(),
		AggPublic: suite.Point(),
		Alias:     (*Alias)(sr),
	}
	//log.Print("Doing JSON unmarshal")
	if err := json.Unmarshal(dataJSON, &aux); err != nil {
		return err
	}
	if err := suite.Read(bytes.NewReader(aux.SignatureInfo), &sr.Response, &sr.Challenge, &sr.AggCommit, &sr.AggPublic); err != nil {
		log.Fatal("decoding signature Response / Challenge / AggCommit: ", err)
		return err
	}
	return nil
}
예제 #6
0
파일: host.go 프로젝트: nikirill/cothority
// listen starts listening for messages coming from any host that tries to
// contact this host. If 'wait' is true, it will try to connect to itself before
// returning.
func (h *Host) listen(wait bool) {
	log.Lvl3(h.ServerIdentity.First(), "starts to listen")
	fn := func(c network.SecureConn) {
		log.Lvl3(h.workingAddress, "Accepted Connection from", c.Remote())
		// register the connection once we know it's ok
		h.registerConnection(c)
		h.handleConn(c)
	}
	go func() {
		log.Lvl4("Host listens on:", h.workingAddress)
		err := h.host.Listen(fn)
		if err != nil {
			log.Fatal("Couldn't listen on", h.workingAddress, ":", err)
		}
	}()
	if wait {
		for {
			log.Lvl4(h.ServerIdentity.First(), "checking if listener is up")
			_, err := h.Connect(h.ServerIdentity)
			if err == nil {
				log.Lvl4(h.ServerIdentity.First(), "managed to connect to itself")
				break
			}
			time.Sleep(network.WaitRetry)
		}
	}
}
예제 #7
0
// Gets the block-directory starting from the current directory - this will
// hold up when running it with 'simul'
func GetBlockDir() string {
	dir, err := os.Getwd()
	if err != nil {
		log.Fatal("Couldn't get working dir:", err)
	}
	return dir + "/blocks"
}
예제 #8
0
파일: stats.go 프로젝트: nikirill/cothority
// Read a config file and fills up some fields for Stats struct
func (s *Stats) readRunConfig(rc map[string]string, defaults ...string) {
	// First find the defaults keys
	for _, def := range defaults {
		valStr, ok := rc[def]
		if !ok {
			log.Fatal("Could not find the default value", def, "in the RunConfig")
		}
		if i, err := strconv.Atoi(valStr); err != nil {
			log.Fatal("Could not parse to integer value", def)
		} else {
			// registers the static value
			s.static[def] = i
			s.staticKeys = append(s.staticKeys, def)
		}
	}
	// Then parse the others keys
	var statics []string
	for k, v := range rc {
		// pass the ones we already registered
		var alreadyRegistered bool
		for _, def := range defaults {
			if k == def {
				alreadyRegistered = true
				break
			}
		}
		if alreadyRegistered {
			continue
		}
		// store it
		if i, err := strconv.Atoi(v); err != nil {
			log.Lvl3("Could not parse the value", k, "from runconfig (v=", v, ")")
			continue
		} else {
			s.static[k] = i
			statics = append(statics, k)
		}
	}
	// sort them so it's always the same order
	sort.Strings(statics)
	// append them to the defaults one
	s.staticKeys = append(s.staticKeys, statics...)

	// let the filter figure out itself what it is supposed to be doing
	s.filter = NewDataFilter(rc)
}
예제 #9
0
// Deploy copies all files to the run-directory
func (d *Localhost) Deploy(rc RunConfig) error {
	if runtime.GOOS == "darwin" {
		files, err := exec.Command("ulimit", "-n").Output()
		if err != nil {
			log.Fatal("Couldn't check for file-limit:", err)
		}
		filesNbr, err := strconv.Atoi(strings.TrimSpace(string(files)))
		if err != nil {
			log.Fatal("Couldn't convert", files, "to a number:", err)
		}
		hosts, _ := strconv.Atoi(rc.Get("hosts"))
		if filesNbr < hosts*2 {
			maxfiles := 10000 + hosts*2
			log.Fatalf("Maximum open files is too small. Please run the following command:\n"+
				"sudo sysctl -w kern.maxfiles=%d\n"+
				"sudo sysctl -w kern.maxfilesperproc=%d\n"+
				"ulimit -n %d\n"+
				"sudo sysctl -w kern.ipc.somaxconn=2048\n",
				maxfiles, maxfiles, maxfiles)
		}
	}

	d.servers, _ = strconv.Atoi(rc.Get("servers"))
	log.Lvl2("Localhost: Deploying and writing config-files for", d.servers, "servers")
	sim, err := sda.NewSimulation(d.Simulation, string(rc.Toml()))
	if err != nil {
		return err
	}
	d.addresses = make([]string, d.servers)
	for i := range d.addresses {
		d.addresses[i] = "localhost" + strconv.Itoa(i)
	}
	d.sc, err = sim.Setup(d.runDir, d.addresses)
	if err != nil {
		return err
	}
	d.sc.Config = string(rc.Toml())
	if err := d.sc.Save(d.runDir); err != nil {
		return err
	}
	log.Lvl2("Localhost: Done deploying")
	return nil

}
예제 #10
0
// Save takes everything in the SimulationConfig structure and saves it to
// dir + SimulationFileName
func (sc *SimulationConfig) Save(dir string) error {
	network.RegisterMessageType(&SimulationConfigFile{})
	scf := &SimulationConfigFile{
		TreeMarshal: sc.Tree.MakeTreeMarshal(),
		Roster:      sc.Roster,
		PrivateKeys: sc.PrivateKeys,
		Config:      sc.Config,
	}
	buf, err := network.MarshalRegisteredType(scf)
	if err != nil {
		log.Fatal(err)
	}
	err = ioutil.WriteFile(dir+"/"+SimulationFileName, buf, 0660)
	if err != nil {
		log.Fatal(err)
	}

	return nil
}
예제 #11
0
파일: simul.go 프로젝트: nikirill/cothority
// RunTest a single test - takes a test-file as a string that will be copied
// to the deterlab-server
func RunTest(rc platform.RunConfig) (*monitor.Stats, error) {
	done := make(chan struct{})
	CheckHosts(rc)
	rc.Delete("simulation")
	rs := monitor.NewStats(rc.Map(), "hosts", "bf")
	monitor := monitor.NewMonitor(rs)

	if err := deployP.Deploy(rc); err != nil {
		log.Error(err)
		return rs, err
	}

	monitor.SinkPort = monitorPort
	if err := deployP.Cleanup(); err != nil {
		log.Error(err)
		return rs, err
	}
	monitor.SinkPort = monitorPort
	go func() {
		if err := monitor.Listen(); err != nil {
			log.Fatal("Could not monitor.Listen():", err)
		}
	}()
	// Start monitor before so ssh tunnel can connect to the monitor
	// in case of deterlab.
	err := deployP.Start()
	if err != nil {
		log.Error(err)
		return rs, err
	}

	go func() {
		var err error
		if err = deployP.Wait(); err != nil {
			log.Lvl3("Test failed:", err)
			if err := deployP.Cleanup(); err != nil {
				log.Lvl3("Couldn't cleanup platform:", err)
			}
			done <- struct{}{}
		}
		log.Lvl3("Test complete:", rs)
		done <- struct{}{}
	}()

	timeOut := getRunWait(rc)
	// can timeout the command if it takes too long
	select {
	case <-done:
		monitor.Stop()
		return rs, nil
	case <-time.After(time.Second * time.Duration(timeOut)):
		monitor.Stop()
		return rs, errors.New("Simulation timeout")
	}
}
예제 #12
0
// Setup implements sda.Simulation interface. It checks on the availability
// of the block-file and downloads it if missing. Then the block-file will be
// copied to the simulation-directory
func (e *Simulation) Setup(dir string, hosts []string) (*sda.SimulationConfig, error) {
	err := blockchain.EnsureBlockIsAvailable(dir)
	if err != nil {
		log.Fatal("Couldn't get block:", err)
	}
	sc := &sda.SimulationConfig{}
	e.CreateRoster(sc, hosts, 2000)
	err = e.CreateTree(sc)
	if err != nil {
		return nil, err
	}
	return sc, nil
}
예제 #13
0
// Configure various internal variables
func (d *Localhost) Configure(pc *Config) {
	pwd, _ := os.Getwd()
	d.runDir = pwd + "/platform/localhost"
	d.localDir = pwd
	d.debug = pc.Debug
	d.running = false
	d.monitorPort = pc.MonitorPort
	d.errChan = make(chan error)
	if d.Simulation == "" {
		log.Fatal("No simulation defined in simulation")
	}
	log.Lvl3(fmt.Sprintf("Localhost dirs: RunDir %s", d.runDir))
	log.Lvl3("Localhost configured ...")
}
예제 #14
0
func TestReadRunfile(t *testing.T) {
	log.TestOutput(testing.Verbose(), 2)
	tplat := &TPlat{}

	tmpfile := "/tmp/testrun.toml"
	err := ioutil.WriteFile(tmpfile, []byte(testfile), 0666)
	if err != nil {
		log.Fatal("Couldn't create file:", err)
	}

	tests := platform.ReadRunFile(tplat, tmpfile)
	log.Lvl2(tplat)
	log.Lvlf2("%+v\n", tests[0])
	if tplat.App != "sign" {
		log.Fatal("App should be 'sign'")
	}
	if len(tests) != 2 {
		log.Fatal("There should be 2 tests")
	}
	if tests[0].Get("machines") != "8" {
		log.Fatal("Machines = 8 has not been copied into RunConfig")
	}
}
예제 #15
0
파일: utils.go 프로젝트: nikirill/cothority
// ReadTomlConfig read any structure from a toml-file
// Takes a filename and an optional directory-name
func ReadTomlConfig(conf interface{}, filename string, dirOpt ...string) error {
	buf, err := ioutil.ReadFile(getFullName(filename, dirOpt...))
	if err != nil {
		pwd, _ := os.Getwd()
		log.Lvl1("Didn't find", filename, "in", pwd)
		return err
	}

	_, err = toml.Decode(string(buf), conf)
	if err != nil {
		log.Fatal(err)
	}

	return nil
}
예제 #16
0
파일: users.go 프로젝트: nikirill/cothority
// Reads in the deterlab-config and drops out if there is an error
func deterFromConfig(name ...string) *platform.Deterlab {
	d := &platform.Deterlab{}
	configName := "deter.toml"
	if len(name) > 0 {
		configName = name[0]
	}
	err := sda.ReadTomlConfig(d, configName)
	_, caller, line, _ := runtime.Caller(1)
	who := caller + ":" + strconv.Itoa(line)
	if err != nil {
		log.Fatal("Couldn't read config in", who, ":", err)
	}
	log.SetDebugVisible(d.Debug)
	return d
}
예제 #17
0
// Dispatch can handle timeouts
func (p *Propagate) Dispatch() error {
	process := true
	log.Lvl4(p.ServerIdentity())
	for process {
		p.Lock()
		timeout := time.Millisecond * time.Duration(p.sd.Msec)
		p.Unlock()
		select {
		case msg := <-p.ChannelSD:
			log.Lvl3(p.ServerIdentity(), "Got data from", msg.ServerIdentity)
			if p.onData != nil {
				_, netMsg, err := network.UnmarshalRegistered(msg.Data)
				if err == nil {
					p.onData(netMsg)
				}
			}
			if !p.IsRoot() {
				log.Lvl3(p.ServerIdentity(), "Sending to parent")
				p.SendToParent(&PropagateReply{})
			}
			if p.IsLeaf() {
				process = false
			} else {
				log.Lvl3(p.ServerIdentity(), "Sending to children")
				p.SendToChildren(&msg.PropagateSendData)
			}
		case <-p.ChannelReply:
			p.received++
			log.Lvl4(p.ServerIdentity(), "received:", p.received, p.subtree)
			if !p.IsRoot() {
				p.SendToParent(&PropagateReply{})
			}
			if p.received == p.subtree {
				process = false
			}
		case <-time.After(timeout):
			log.Fatal("Timeout")
			process = false
		}
	}
	if p.IsRoot() {
		if p.onDoneCb != nil {
			p.onDoneCb(p.received + 1)
		}
	}
	p.Done()
	return nil
}
예제 #18
0
// Build makes sure that the binary is available for our local platform
func (d *Localhost) Build(build string, arg ...string) error {
	src := "./cothority"
	dst := d.runDir + "/" + d.Simulation
	start := time.Now()
	// build for the local machine
	res, err := Build(src, dst,
		runtime.GOARCH, runtime.GOOS,
		arg...)
	if err != nil {
		log.Fatal("Error while building for localhost (src", src, ", dst", dst, ":", res)
	}
	log.Lvl3("Localhost: Build src", src, ", dst", dst)
	log.Lvl4("Localhost: Results of localhost build:", res)
	log.Lvl2("Localhost: build finished in", time.Since(start))
	return err
}
예제 #19
0
func TestStatsString(t *testing.T) {
	rc := map[string]string{"servers": "10", "hosts": "10"}
	rs := NewStats(rc)
	m := NewMonitor(rs)

	go func() {
		if err := m.Listen(); err != nil {
			log.Fatal("Could not Listen():", err)
		}
	}()
	time.Sleep(100 * time.Millisecond)
	ConnectSink("localhost:10000")
	measure := NewTimeMeasure("test")
	time.Sleep(time.Millisecond * 100)
	measure.Record()
	time.Sleep(time.Millisecond * 100)

	if !strings.Contains(rs.String(), "0.1") {
		t.Fatal("The measurement should contain 0.1:", rs.String())
	}
	m.Stop()
}
예제 #20
0
파일: local.go 프로젝트: nikirill/cothority
// GenLocalHosts will create n hosts with the first one being connected to each of
// the other nodes if connect is true.
func GenLocalHosts(n int, connect bool, processMessages bool) []*Host {

	hosts := make([]*Host, n)
	for i := 0; i < n; i++ {
		host := NewLocalHost(2000 + i*10)
		hosts[i] = host
	}
	root := hosts[0]
	for _, host := range hosts {
		host.ListenAndBind()
		log.Lvlf3("Listening on %s %x", host.ServerIdentity.First(), host.ServerIdentity.ID)
		if processMessages {
			host.StartProcessMessages()
		}
		if connect && root != host {
			log.Lvl4("Connecting", host.ServerIdentity.First(), host.ServerIdentity.ID, "to",
				root.ServerIdentity.First(), root.ServerIdentity.ID)
			if _, err := host.Connect(root.ServerIdentity); err != nil {
				log.Fatal(host.ServerIdentity.Addresses, "Could not connect hosts", root.ServerIdentity.Addresses, err)
			}
			// Wait for connection accepted in root
			connected := false
			for !connected {
				time.Sleep(time.Millisecond * 10)
				root.networkLock.Lock()
				for id := range root.connections {
					if id.Equal(host.ServerIdentity.ID) {
						connected = true
						break
					}
				}
				root.networkLock.Unlock()
			}
			log.Lvl4(host.ServerIdentity.First(), "is connected to root")
		}
	}
	return hosts
}
예제 #21
0
파일: messg.go 프로젝트: nikirill/cothority
func (Treq *BlockReply) UnmarshalBinary(data []byte) error {
	log.Fatal("Don't want to do that")
	return nil
}
예제 #22
0
파일: users.go 프로젝트: nikirill/cothority
func main() {
	// init with deter.toml
	deter := deterFromConfig()
	flag.Parse()

	// kill old processes
	var wg sync.WaitGroup
	re := regexp.MustCompile(" +")
	hosts, err := exec.Command("/usr/testbed/bin/node_list", "-e", deter.Project+","+deter.Experiment).Output()
	if err != nil {
		log.Fatal("Deterlab experiment", deter.Project+"/"+deter.Experiment, "seems not to be swapped in. Aborting.")
		os.Exit(-1)
	}
	hostsTrimmed := strings.TrimSpace(re.ReplaceAllString(string(hosts), " "))
	hostlist := strings.Split(hostsTrimmed, " ")
	doneHosts := make([]bool, len(hostlist))
	log.Lvl2("Found the following hosts:", hostlist)
	if kill {
		log.Lvl1("Cleaning up", len(hostlist), "hosts.")
	}
	for i, h := range hostlist {
		wg.Add(1)
		go func(i int, h string) {
			defer wg.Done()
			if kill {
				log.Lvl3("Cleaning up host", h, ".")
				runSSH(h, "sudo killall -9 cothority scp 2>/dev/null >/dev/null")
				time.Sleep(1 * time.Second)
				runSSH(h, "sudo killall -9 cothority 2>/dev/null >/dev/null")
				time.Sleep(1 * time.Second)
				// Also kill all other process that start with "./" and are probably
				// locally started processes
				runSSH(h, "sudo pkill -9 -f '\\./'")
				time.Sleep(1 * time.Second)
				if log.DebugVisible() > 3 {
					log.Lvl4("Cleaning report:")
					_ = platform.SSHRunStdout("", h, "ps aux")
				}
			} else {
				log.Lvl3("Setting the file-limit higher on", h)

				// Copy configuration file to make higher file-limits
				err := platform.SSHRunStdout("", h, "sudo cp remote/cothority.conf /etc/security/limits.d")
				if err != nil {
					log.Fatal("Couldn't copy limit-file:", err)
				}
			}
			doneHosts[i] = true
			log.Lvl3("Host", h, "cleaned up")
		}(i, h)
	}

	cleanupChannel := make(chan string)
	go func() {
		wg.Wait()
		log.Lvl3("Done waiting")
		cleanupChannel <- "done"
	}()
	select {
	case msg := <-cleanupChannel:
		log.Lvl3("Received msg from cleanupChannel", msg)
	case <-time.After(time.Second * 20):
		for i, m := range doneHosts {
			if !m {
				log.Lvl1("Missing host:", hostlist[i], "- You should run")
				log.Lvl1("/usr/testbed/bin/node_reboot", hostlist[i])
			}
		}
		log.Fatal("Didn't receive all replies while cleaning up - aborting.")
	}

	if kill {
		log.Lvl2("Only cleaning up - returning")
		return
	}

	// ADDITIONS : the monitoring part
	// Proxy will listen on Sink:SinkPort and redirect every packet to
	// RedirectionAddress:SinkPort-1. With remote tunnel forwarding it will
	// be forwarded to the real sink
	proxyAddress := deter.ProxyAddress + ":" + strconv.Itoa(deter.MonitorPort+1)
	log.Lvl2("Launching proxy redirecting to", proxyAddress)
	err = monitor.Proxy(proxyAddress)
	if err != nil {
		log.Fatal("Couldn't start proxy:", err)
	}

	log.Lvl1("starting", deter.Servers, "cothorities for a total of", deter.Hosts, "processes.")
	killing := false
	for i, phys := range deter.Phys {
		log.Lvl2("Launching cothority on", phys)
		wg.Add(1)
		go func(phys, internal string) {
			//log.Lvl4("running on", phys, cmd)
			defer wg.Done()
			monitorAddr := deter.MonitorAddress + ":" + strconv.Itoa(deter.MonitorPort)
			log.Lvl4("Starting servers on physical machine ", internal, "with monitor = ",
				monitorAddr)
			args := " -address=" + internal +
				" -simul=" + deter.Simulation +
				" -monitor=" + monitorAddr +
				" -debug=" + strconv.Itoa(log.DebugVisible())
			log.Lvl3("Args is", args)
			err := platform.SSHRunStdout("", phys, "cd remote; sudo ./cothority "+
				args)
			if err != nil && !killing {
				log.Lvl1("Error starting cothority - will kill all others:", err, internal)
				killing = true
				err := exec.Command("killall", "ssh").Run()
				if err != nil {
					log.Fatal("Couldn't killall ssh:", err)
				}
			}
			log.Lvl4("Finished with cothority on", internal)
		}(phys, deter.Virt[i])
	}

	// wait for the servers to finish before stopping
	wg.Wait()
}
예제 #23
0
// ReadRunFile reads from a configuration-file for a run. The configuration-file has the
// following syntax:
// Name1 = value1
// Name2 = value2
// [empty line]
// n1, n2, n3, n4
// v11, v12, v13, v14
// v21, v22, v23, v24
//
// The Name1...Namen are global configuration-options.
// n1..nn are configuration-options for one run
// Both the global and the run-configuration are copied to both
// the platform and the app-configuration.
func ReadRunFile(p Platform, filename string) []RunConfig {
	var runconfigs []RunConfig
	masterConfig := NewRunConfig()
	log.Lvl3("Reading file", filename)

	file, err := os.Open(filename)
	defer func() {
		if err := file.Close(); err != nil {
			log.Error("Couldn' close", file.Name())
		}
	}()
	if err != nil {
		log.Fatal("Couldn't open file", file, err)
	}

	// Decoding of the first part of the run config file
	// where the config wont change for the whole set of the simulation's tests
	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		text := scanner.Text()
		log.Lvl3("Decoding", text)
		// end of the first part
		if text == "" {
			break
		}
		if text[0] == '#' {
			continue
		}

		// checking if format is good
		vals := strings.Split(text, "=")
		if len(vals) != 2 {
			log.Fatal("Simulation file:", filename, " is not properly formatted ( key = value )")
		}
		// fill in the general config
		masterConfig.Put(strings.TrimSpace(vals[0]), strings.TrimSpace(vals[1]))
		// also put it in platform
		if _, err := toml.Decode(text, p); err != nil {
			log.Error("Error decoding", text)
		}
		log.Lvlf5("Platform is now %+v", p)
	}

	for {
		scanner.Scan()
		if scanner.Text() != "" {
			break
		}
	}
	args := strings.Split(scanner.Text(), ", ")
	for scanner.Scan() {
		rc := masterConfig.Clone()
		// put each individual test configs
		for i, value := range strings.Split(scanner.Text(), ", ") {
			rc.Put(strings.TrimSpace(args[i]), strings.TrimSpace(value))
		}
		runconfigs = append(runconfigs, *rc)
	}

	return runconfigs
}
예제 #24
0
// BinaryUnmarshaler sets the different values from a byte-slice
func (c *Client) BinaryUnmarshaler(b []byte) error {
	log.Fatal("Not yet implemented")
	return nil
}
예제 #25
0
// BinaryMarshaler can be used to store the client in a configuration-file
func (c *Client) BinaryMarshaler() ([]byte, error) {
	log.Fatal("Not yet implemented")
	return nil, nil
}
예제 #26
0
// Main starts the host and will setup the protocol.
func main() {
	flag.Parse()
	log.SetDebugVisible(debugVisible)
	log.Lvl3("Flags are:", hostAddress, simul, log.DebugVisible, monitorAddress)

	scs, err := sda.LoadSimulationConfig(".", hostAddress)
	measures := make([]*monitor.CounterIOMeasure, len(scs))
	if err != nil {
		// We probably are not needed
		log.Lvl2(err, hostAddress)
		return
	}
	if monitorAddress != "" {
		if err := monitor.ConnectSink(monitorAddress); err != nil {
			log.Error("Couldn't connect monitor to sink:", err)
		}
	}
	sims := make([]sda.Simulation, len(scs))
	var rootSC *sda.SimulationConfig
	var rootSim sda.Simulation
	for i, sc := range scs {
		// Starting all hosts for that server
		host := sc.Host
		measures[i] = monitor.NewCounterIOMeasure("bandwidth", host)
		log.Lvl3(hostAddress, "Starting host", host.ServerIdentity.Addresses)
		host.Listen()
		host.StartProcessMessages()
		sim, err := sda.NewSimulation(simul, sc.Config)
		if err != nil {
			log.Fatal(err)
		}
		err = sim.Node(sc)
		if err != nil {
			log.Fatal(err)
		}
		sims[i] = sim
		if host.ServerIdentity.ID == sc.Tree.Root.ServerIdentity.ID {
			log.Lvl2(hostAddress, "is root-node, will start protocol")
			rootSim = sim
			rootSC = sc
		}
	}
	if rootSim != nil {
		// If this cothority has the root-host, it will start the simulation
		log.Lvl2("Starting protocol", simul, "on host", rootSC.Host.ServerIdentity.Addresses)
		//log.Lvl5("Tree is", rootSC.Tree.Dump())

		// First count the number of available children
		childrenWait := monitor.NewTimeMeasure("ChildrenWait")
		wait := true
		// The timeout starts with 1 second, which is the time of response between
		// each level of the tree.
		timeout := 1000
		for wait {
			p, err := rootSC.Overlay.CreateProtocolSDA(rootSC.Tree, "Count")
			if err != nil {
				log.Fatal(err)
			}
			proto := p.(*manage.ProtocolCount)
			proto.SetTimeout(timeout)
			proto.Start()
			log.Lvl1("Started counting children with timeout of", timeout)
			select {
			case count := <-proto.Count:
				if count == rootSC.Tree.Size() {
					log.Lvl1("Found all", count, "children")
					wait = false
				} else {
					log.Lvl1("Found only", count, "children, counting again")
				}
			}
			// Double the timeout and try again if not successful.
			timeout *= 2
		}
		childrenWait.Record()
		log.Lvl1("Starting new node", simul)
		measureNet := monitor.NewCounterIOMeasure("bandwidth_root", rootSC.Host)
		err := rootSim.Run(rootSC)
		if err != nil {
			log.Fatal(err)
		}
		measureNet.Record()

		// Test if all ServerIdentities are used in the tree, else we'll run into
		// troubles with CloseAll
		if !rootSC.Tree.UsesList() {
			log.Error("The tree doesn't use all ServerIdentities from the list!\n" +
				"This means that the CloseAll will fail and the experiment never ends!")
		}
		closeTree := rootSC.Tree
		if rootSC.GetSingleHost() {
			// In case of "SingleHost" we need a new tree that contains every
			// entity only once, whereas rootSC.Tree will have the same
			// entity at different TreeNodes, which makes it difficult to
			// correctly close everything.
			log.Lvl2("Making new root-tree for SingleHost config")
			closeTree = rootSC.Roster.GenerateBinaryTree()
			rootSC.Overlay.RegisterTree(closeTree)
		}
		pi, err := rootSC.Overlay.CreateProtocolSDA(closeTree, "CloseAll")
		pi.Start()
		if err != nil {
			log.Fatal(err)
		}
	}

	// Wait for all hosts to be closed
	allClosed := make(chan bool)
	go func() {
		for i, sc := range scs {
			sc.Host.WaitForClose()
			// record the bandwidth
			measures[i].Record()
			log.Lvl3(hostAddress, "Simulation closed host", sc.Host.ServerIdentity.Addresses, "closed")
		}
		allClosed <- true
	}()
	log.Lvl3(hostAddress, scs[0].Host.ServerIdentity.First(), "is waiting for all hosts to close")
	<-allClosed
	log.Lvl2(hostAddress, "has all hosts closed")
	monitor.EndAndCleanup()
}
예제 #27
0
파일: messg.go 프로젝트: nikirill/cothority
func (Treq BlockReply) MarshalBinary() ([]byte, error) {
	log.Fatal("Don't want to do that")
	return nil, nil
}
예제 #28
0
파일: messg.go 프로젝트: nikirill/cothority
func (sm *BitCoSiMessage) UnmarshalBinary(data []byte) error {
	log.Fatal("Don't want to do that")
	return nil
}
예제 #29
0
파일: proxy.go 프로젝트: nikirill/cothority
// Proxy will launch a routine that waits for input connections
// It takes a redirection address soas to where redirect incoming packets
// Proxy will listen on Sink:SinkPort variables so that the user do not
// differentiate between connecting to a proxy or directly to the sink
// It will panic if it can not contact the server or can not bind to the address
func Proxy(redirection string) error {
	// Connect to the sink

	if err := connectToSink(redirection); err != nil {
		return err
	}
	log.Lvl2("Proxy connected to sink", redirection)

	// The proxy listens on the port one lower than itself
	_, port, err := net.SplitHostPort(redirection)
	if err != nil {
		log.Fatal("Couldn't get port-numbre from", redirection)
	}
	portNbr, err := strconv.Atoi(port)
	if err != nil {
		log.Fatal("Couldn't convert", port, "to a number")
	}
	sinkAddr := Sink + ":" + strconv.Itoa(portNbr-1)
	ln, err := net.Listen("tcp", sinkAddr)
	if err != nil {
		return fmt.Errorf("Error while binding proxy to addr %s: %v", sinkAddr, err)
	}
	log.Lvl2("Proxy listening on", sinkAddr)
	newConn := make(chan bool)
	closeConn := make(chan bool)
	finished := false
	proxyConns := make(map[string]*json.Encoder)

	// Listen for incoming connections
	go func() {
		for finished == false {
			conn, err := ln.Accept()
			if err != nil {
				operr, ok := err.(*net.OpError)
				// the listener is closed
				if ok && operr.Op == "accept" {
					break
				}
				log.Lvl1("Error proxy accepting connection:", err)
				continue
			}
			log.Lvl3("Proxy accepting incoming connection from:", conn.RemoteAddr().String())
			newConn <- true
			proxyConns[conn.RemoteAddr().String()] = json.NewEncoder(conn)
			go proxyConnection(conn, closeConn)
		}
	}()

	go func() {
		// notify every new connection and every end of connection. When all
		// connections are closed, send an "end" measure to the sink.
		var nconn int
		for finished == false {
			select {
			case <-newConn:
				nconn++
			case <-closeConn:
				nconn--
				if nconn == 0 {
					// everything is finished
					if err := serverEnc.Encode(NewSingleMeasure("end", 0)); err != nil {
						log.Error("Couldn't send 'end' message:", err)
					}
					if err := serverConn.Close(); err != nil {
						log.Error("Couldn't close server connection:", err)
					}
					if err := ln.Close(); err != nil {
						log.Error("Couldn't close listener:", err)
					}
					finished = true
					break
				}
			}
		}
	}()
	return nil
}
예제 #30
0
파일: messg.go 프로젝트: nikirill/cothority
func (tsm BitCoSiMessage) MarshalBinary() ([]byte, error) {
	log.Fatal("Don't want to do that")
	return nil, nil
}