Exemplo n.º 1
0
func (d *Deter) Build(build string) error {
	dbg.Lvl1("Building for", d.Login, d.Host, d.Project, build)
	start := time.Now()

	var wg sync.WaitGroup

	// Start with a clean build-directory
	current, _ := os.Getwd()
	dbg.Lvl4("Current dir is:", current)
	defer os.Chdir(current)

	// Go into deterlab-dir and create the build-dir
	os.Chdir(d.DeterDir)
	os.RemoveAll(d.BuildDir)
	os.Mkdir(d.BuildDir, 0777)

	// start building the necessary packages
	packages := []string{"logserver", "forkexec", "../../app", "deter"}
	if build != "" {
		packages = strings.Split(build, ",")
	}
	dbg.Lvl3("Starting to build all executables", packages)
	for _, p := range packages {
		basename := path.Base(p)
		dbg.Lvl4("Building ", p, "into", basename)
		wg.Add(1)
		src := p + "/" + basename + ".go"
		dst := d.BuildDir + "/" + basename
		if p == "deter" {
			go func(s, d string) {
				defer wg.Done()
				// the users node has a 386 FreeBSD architecture
				out, err := cliutils.Build(s, d, "386", "freebsd")
				if err != nil {
					cliutils.KillGo()
					fmt.Println(out)
					log.Fatal(err)
				}
			}(src, dst)
			continue
		}
		go func(s, d string) {
			defer wg.Done()
			// deter has an amd64, linux architecture
			out, err := cliutils.Build(s, d, "amd64", "linux")
			if err != nil {
				cliutils.KillGo()
				fmt.Println(out)
				log.Fatal(err)
			}
		}(src, dst)
	}
	// wait for the build to finish
	wg.Wait()
	dbg.Lvl1("Build is finished after", time.Since(start))
	return nil
}
Exemplo n.º 2
0
func ExampleLongFunctions() {
	dbg.Lvl1("Before")
	thisIsAVeryLongFunctionNameThatWillOverflow()
	dbg.Lvl1("After")

	// Output:
	// 1: (     debug_lvl_test.ExampleLongFunctions: 0) - Before
	// 1: (debug_lvl_test.thisIsAVeryLongFunctionNameThatWillOverflow: 0) - Overflow
	// 1: (                       debug_lvl_test.ExampleLongFunctions: 0) - After
}
Exemplo n.º 3
0
func main() {
	flag.Parse()

	switch app {
	default:
		switch deploy_dst {
		default:
			dbg.Lvl1("Sorry, deployment method", deploy_dst, "not yet implemented")
		case "deterlab":
			dbg.Lvl1("Deploying to deterlab")
			deploy.Start("deterlab", nobuild, build, machines)
		}
	case "server", "client":
		dbg.Lvl1("Sorry,", app, "not yet implemented")
	}
}
Exemplo n.º 4
0
func RunClient(server string, nmsgs int, name string, rate int) {
	dbg.Lvl4("Starting to run stampclient")
	c := NewClient(name)
	servers := strings.Split(server, ",")

	// connect to all the servers listed
	for _, s := range servers {
		h, p, err := net.SplitHostPort(s)
		if err != nil {
			log.Fatal("improperly formatted host")
		}
		pn, _ := strconv.Atoi(p)
		c.AddServer(s, coconet.NewTCPConn(net.JoinHostPort(h, strconv.Itoa(pn+1))))
	}

	// Check if somebody asks for the old way
	if rate < 0 {
		log.Fatal("Rounds based limiting deprecated")
	}

	// Stream time coll_stamp requests
	// if rate specified send out one message every rate milliseconds
	dbg.Lvl1(name, "starting to stream at rate", rate)
	streamMessgs(c, servers, rate)
	dbg.Lvl4("Finished streaming")
	return
}
Exemplo n.º 5
0
// Checks whether host, login and project are defined. If any of them are missing, it will
// ask on the command-line.
// For the login-variable, it will try to set up a connection to d.Host and copy over the
// public key for a more easy communication
func (d *Deter) checkDeterlabVars() {
	// Write
	var config, err = ReadConfig(d.DeterDir)

	if err != nil {
		dbg.Lvl1("Couldn't read config-file - asking for default values")
	}

	if config.Host == "" {
		d.Host = readString("Please enter the hostname of deterlab (enter for 'users.deterlab.net'): ",
			"users.deterlab.net")
	} else {
		d.Host = config.Host
	}

	if config.Login == "" {
		d.Login = readString("Please enter the login-name on "+d.Host+":", "")
	} else {
		d.Login = config.Login
	}

	if config.Project == "" {
		d.Project = readString("Please enter the project on deterlab: ", "Dissent-CS")
	} else {
		d.Project = config.Project
	}

	d.WriteConfig(d.DeterDir)
}
Exemplo n.º 6
0
// hpn, bf, nmsgsG
func RunTest(t T) (RunStats, error) {
	// add timeout for 10 minutes?
	done := make(chan struct{})
	var rs RunStats
	cfg := &Config{
		t.nmachs, deploy_config.Nloggers, t.hpn, t.bf,
		-1, t.rate, t.rounds, t.failures, t.rFail, t.fFail,
		deploy_config.Debug, deploy_config.RootWait, t.app, deploy_config.Suite}

	dbg.Lvl1("Running test with parameters", cfg)
	dbg.Lvl1("Failures percent is", t.failures)

	deployP.Configure(cfg)
	deployP.Deploy()
	err := deployP.Start()
	if err != nil {
		log.Fatal(err)
		return rs, nil
	}

	// give it a while to start up
	time.Sleep(10 * time.Second)

	go func() {
		rs = Monitor(t.bf)
		deployP.Stop()
		dbg.Lvl2("Test complete:", rs)
		done <- struct{}{}
	}()

	// timeout the command if it takes too long
	select {
	case <-done:
		if isZero(rs.MinTime) || isZero(rs.MaxTime) || isZero(rs.AvgTime) || math.IsNaN(rs.Rate) || math.IsInf(rs.Rate, 0) {
			return rs, errors.New(fmt.Sprintf("unable to get good data: %+v", rs))
		}
		return rs, nil
		/* No time out for the moment
		case <-time.After(5 * time.Minute):
			return rs, errors.New("timed out")
		*/
	}
}
Exemplo n.º 7
0
func ExampleLevel2() {
	dbg.Lvl1("Level1")
	dbg.Lvl3("Level2")
	dbg.Lvl4("Level3")
	dbg.Lvl4("Level4")
	dbg.Lvl5("Level5")

	// Output:
	// 1: (            debug_lvl_test.ExampleLevel2: 0) - Level1
	// 2: (            debug_lvl_test.ExampleLevel2: 0) - Level2
}
Exemplo n.º 8
0
func (d *Deter) Deploy() error {
	dbg.Lvl1("Assembling all files and configuration options")
	os.RemoveAll(d.DeployDir)
	os.Mkdir(d.DeployDir, 0777)

	dbg.Lvl1("Writing config-files")

	d.generateHostsFile()
	d.readHosts()
	d.calculateGraph()
	d.WriteConfig()

	// copy the webfile-directory of the logserver to the remote directory
	err := exec.Command("cp", "-a", d.DeterDir+"/logserver/webfiles",
		d.DeterDir+"/cothority.conf", d.DeployDir).Run()
	if err != nil {
		log.Fatal("error copying webfiles:", err)
	}
	build, err := ioutil.ReadDir(d.BuildDir)
	for _, file := range build {
		err = exec.Command("cp", d.BuildDir+"/"+file.Name(), d.DeployDir).Run()
		if err != nil {
			log.Fatal("error copying build-file:", err)
		}
	}

	dbg.Lvl1("Copying over to", d.Login, "@", d.Host)
	// Copy everything over to deterlabs
	err = cliutils.Rsync(d.Login, d.Host, d.DeployDir+"/", "remote/")
	if err != nil {
		log.Fatal(err)
	}

	dbg.Lvl1("Done copying")

	dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host,
		""))

	return nil
}
Exemplo n.º 9
0
func Start(destination string, nbld bool, build string, machines int) {
	deployP.Configure(deploy_config)
	nobuild = nbld
	deploy_config.Nmachs = machines

	deployP.Stop()

	if nobuild == false {
		deployP.Build(build)
	}

	dbg.Lvl1("Starting tests")
	DefaultRounds = 5
	RunTests("schnorr_host_single", SchnorrHostSingle)
	//RunTests("sign_test_single", SignTestSingle)
	//RunTests("sign_test_multi2", SignTestMulti2)
	//RunTests("sign_test_multi", SignTestMulti)
	//RunTests("hosts_test_single", HostsTestSingle)
	//RunTests("hosts_test_short", HostsTestShort)
	//RunTests("hosts_test", HostsTest)
	//RunTests("stamp_test_single", StampTestSingle)
	//RunTests("sign_test_multi", SignTestMulti)
	// test the testing framework
	//RunTests("vote_test_no_signing.csv", VTest)
	//RunTests("hosts_test", HostsTest)
	// t := FailureTests
	// RunTests("failure_test.csv", t)
	// RunTests("vote_test", VotingTest)
	// RunTests("failure_test", FailureTests)
	//RunTests("sign_test", SignTest)
	// t := FailureTests
	// RunTests("failure_test", t)
	// t = ScaleTest(10, 1, 100, 2)
	// RunTests("scale_test.csv", t)
	// how does the branching factor effect speed
	// t = DepthTestFixed(100)
	// RunTests("depth_test.csv", t)

	// load test the client
	// t = RateLoadTest(40, 10)
	// RunTests("load_rate_test_bf10.csv", t)
	// t = RateLoadTest(40, 50)
	// RunTests("load_rate_test_bf50.csv", t)
}
Exemplo n.º 10
0
func (d *Deter) Start() error {
	dbg.Lvl1("Running with", d.Config.Nmachs, "nodes *", d.Config.Hpn, "hosts per node =",
		d.Config.Nmachs*d.Config.Hpn, "and", d.Config.Nloggers, "loggers")

	// setup port forwarding for viewing log server
	dbg.Lvl3("setup port forwarding for master logger: ", d.masterLogger, d.Login, d.Host)
	cmd := exec.Command(
		"ssh",
		"-t",
		"-t",
		fmt.Sprintf("%s@%s", d.Login, d.Host),
		"-L",
		"8081:"+d.masterLogger+":10000")
	err := cmd.Start()
	if err != nil {
		log.Fatal("failed to setup portforwarding for logging server")
	}

	dbg.Lvl3("runnning deter with nmsgs:", d.Config.Nmsgs, d.Login, d.Host)
	// run the deter lab boss nodes process
	// it will be responsible for forwarding the files and running the individual
	// timestamping servers

	go func() {
		dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host,
			"GOMAXPROCS=8 remote/deter -nmsgs="+strconv.Itoa(d.Config.Nmsgs)+
				" -hpn="+strconv.Itoa(d.Config.Hpn)+
				" -bf="+strconv.Itoa(d.Config.Bf)+
				" -rate="+strconv.Itoa(d.Config.Rate)+
				" -rounds="+strconv.Itoa(d.Config.Rounds)+
				" -debug="+strconv.Itoa(d.Config.Debug)+
				" -failures="+strconv.Itoa(d.Config.Failures)+
				" -rfail="+strconv.Itoa(d.Config.RFail)+
				" -ffail="+strconv.Itoa(d.Config.FFail)+
				" -app="+d.Config.App+
				" -suite="+d.Config.Suite))
		dbg.Lvl3("Sending stop of ssh")
		d.sshDeter <- "stop"
	}()

	return nil
}
Exemplo n.º 11
0
func (d *Deter) Stop() error {
	killssh := exec.Command("pkill", "-f", "ssh -t -t")
	killssh.Stdout = os.Stdout
	killssh.Stderr = os.Stderr
	err := killssh.Run()
	if err != nil {
		dbg.Lvl3("Stopping ssh: ", err)
	}
	select {
	case msg := <-d.sshDeter:
		if msg == "stop" {
			dbg.Lvl3("SSh is stopped")
		} else {
			dbg.Lvl1("Received other command", msg)
		}
	case <-time.After(time.Second * 3):
		dbg.Lvl3("Timeout error when waiting for end of ssh")
	}
	return nil
}
Exemplo n.º 12
0
func RunServer(app *config.AppConfig, conf *deploy.Config, hc *config.HostConfig) {
	// run this specific host
	err := hc.Run(false, sign.MerkleTree, app.Hostname)
	if err != nil {
		log.Fatal(err)
	}

	dbg.Lvl3(app.Hostname, "started up in server-mode")

	// Let's start the client if we're the root-node
	if hc.SNodes[0].IsRoot(0) {
		dbg.Lvl1(app.Hostname, "started client")
		RunClient(conf, hc)
	} else {
		// Endless-loop till we stop by tearing down the connections
		for {
			time.Sleep(time.Minute)
		}
	}
}
Exemplo n.º 13
0
// Calculates a tree that is used for the timestampers
func (d *Deter) calculateGraph() {
	d.virt = d.virt[d.Config.Nloggers:]
	d.phys = d.phys[d.Config.Nloggers:]
	t, hostnames, depth, err := graphs.TreeFromList(d.virt, d.Config.Hpn, d.Config.Bf)
	dbg.Lvl2("Depth:", depth)
	dbg.Lvl2("Total hosts:", len(hostnames))
	total := d.Config.Nmachs * d.Config.Hpn
	if len(hostnames) != total {
		dbg.Lvl1("Only calculated", len(hostnames), "out of", total, "hosts - try changing number of",
			"machines or hosts per node")
		log.Fatal("Didn't calculate enough hosts")
	}

	// generate the configuration file from the tree
	cf := config.ConfigFromTree(t, hostnames)
	cfb, err := json.Marshal(cf)
	err = ioutil.WriteFile(d.DeployDir+"/tree.json", cfb, 0666)
	if err != nil {
		log.Fatal(err)
	}
}
Exemplo n.º 14
0
// RunTests runs the given tests and puts the output into the
// given file name. It outputs RunStats in a CSV format.
func RunTests(name string, ts []T) {
	for i, _ := range ts {
		ts[i].nmachs = deploy_config.Nmachs
	}

	MkTestDir()
	rs := make([]RunStats, len(ts))
	f, err := os.OpenFile(TestFile(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
	if err != nil {
		log.Fatal("error opening test file:", err)
	}
	_, err = f.Write(rs[0].CSVHeader())
	if err != nil {
		log.Fatal("error writing test file header:", err)
	}
	err = f.Sync()
	if err != nil {
		log.Fatal("error syncing test file:", err)
	}

	nTimes := 1
	stopOnSuccess := true
	for i, t := range ts {
		// run test t nTimes times
		// take the average of all successful runs
		var runs []RunStats
		for r := 0; r < nTimes; r++ {
			run, err := RunTest(t)
			if err != nil {
				log.Fatalln("error running test:", err)
			}

			if deployP.Stop() == nil {
				runs = append(runs, run)
				if stopOnSuccess {
					break
				}
			} else {
				dbg.Lvl1("Error for test ", r, " : ", err)
			}
		}

		if len(runs) == 0 {
			dbg.Lvl1("unable to get any data for test:", t)
			continue
		}

		rs[i] = RunStatsAvg(runs)
		//log.Println(fmt.Sprintf("Writing to CSV for %d: %+v", i, rs[i]))
		_, err := f.Write(rs[i].CSV())
		if err != nil {
			log.Fatal("error writing data to test file:", err)
		}
		err = f.Sync()
		if err != nil {
			log.Fatal("error syncing data to test file:", err)
		}

		cl, err := os.OpenFile(
			TestFile("client_latency_"+name+"_"+strconv.Itoa(i)),
			os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0660)
		if err != nil {
			log.Fatal("error opening test file:", err)
		}
		_, err = cl.Write(rs[i].TimesCSV())
		if err != nil {
			log.Fatal("error writing client latencies to file:", err)
		}
		err = cl.Sync()
		if err != nil {
			log.Fatal("error syncing data to latency file:", err)
		}
		cl.Close()

	}
}
Exemplo n.º 15
0
func RunServer(hosts *config.HostsConfig, app *config.AppConfig, depl *deploy.Config) {
	s := config.GetSuite(depl.Suite)
	poly.SUITE = s
	poly.SECURITY = poly.MODERATE
	n := len(hosts.Hosts)

	info := poly.PolyInfo{
		N: n,
		R: n,
		T: n,
	}
	indexPeer := -1
	for i, h := range hosts.Hosts {
		if h == app.Hostname {
			indexPeer = i
			break
		}
	}
	if indexPeer == -1 {
		log.Fatal("Peer ", app.Hostname, "(", app.PhysAddr, ") did not find any match for its name.Abort")
	}

	start := time.Now()
	dbg.Lvl1("Creating new peer ", app.Hostname, "(", app.PhysAddr, ") ...")
	// indexPeer == 0 <==> peer is root
	p := NewPeer(indexPeer, app.Hostname, info, indexPeer == 0)

	// make it listen
	dbg.Lvl2("Peer", app.Hostname, "is now listening for incoming connections")
	go p.Listen()

	// then connect it to its successor in the list
	for _, h := range hosts.Hosts[indexPeer+1:] {
		dbg.Lvl2("Peer ", app.Hostname, " will connect to ", h)
		// will connect and SYN with the remote peer
		p.ConnectTo(h)
	}
	// Wait until this peer is connected / SYN'd with each other peer
	p.WaitSYNs()

	if p.IsRoot() {
		delta := time.Since(start)
		dbg.Lvl2(p.String(), "Connections accomplished in", delta)
		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "schnorr_connect",
			"round": 0,
			"time":  delta,
		}).Info("")
	}

	// start to record
	start = time.Now()

	// Setup the schnorr system amongst peers
	p.SetupDistributedSchnorr()
	p.SendACKs()
	p.WaitACKs()
	dbg.Lvl1(p.String(), "completed Schnorr setup")

	// send setup time if we're root
	if p.IsRoot() {
		delta := time.Since(start)
		dbg.Lvl2(p.String(), "setup accomplished in ", delta)
		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "schnorr_setup",
			"round": 0,
			"time":  delta,
		}).Info("")
	}

	for round := 0; round < depl.Rounds; round++ {
		if p.IsRoot() {
			dbg.Lvl2("Starting round", round)
		}

		// Then issue a signature !
		start = time.Now()
		msg := "hello world"

		// Only root calculates if it's OK and sends a log-message
		if p.IsRoot() {
			sig := p.SchnorrSigRoot([]byte(msg))
			err := p.VerifySchnorrSig(sig, []byte(msg))
			if err != nil {
				dbg.Fatal(p.String(), "could not verify schnorr signature :/ ", err)
			}

			dbg.Lvl2(p.String(), "verified the schnorr sig !")
			// record time
			delta := time.Since(start)
			dbg.Lvl2(p.String(), "signature done in ", delta)
			log.WithFields(log.Fields{
				"file":  logutils.File(),
				"type":  "schnorr_round",
				"round": round,
				"time":  delta,
			}).Info("")
		} else {
			// Compute the partial sig and send it to the root
			p.SchnorrSigPeer([]byte(msg))
		}
	}

	p.WaitFins()
	dbg.Lvl1(p.String(), "is leaving ...")

	if p.IsRoot() {
		log.WithFields(log.Fields{
			"file": logutils.File(),
			"type": "schnorr_end",
		}).Info("")
	}
}
Exemplo n.º 16
0
func main() {
	deter, err := deploy.ReadConfig()
	if err != nil {
		log.Fatal("Couldn't read config in logserver:", err)
	}
	cfg = deter.Config
	dbg.DebugVisible = cfg.Debug

	runtime.GOMAXPROCS(runtime.NumCPU())
	// read in from flags the port I should be listening on
	flag.Parse()

	if master == "" {
		isMaster = true
	}
	var role string
	if isMaster {
		role = "Master"
	} else {
		role = "Servent"
	}
	dbg.Lvl3("running logserver", role, "with nmsgs", cfg.Nmsgs, "branching factor: ", cfg.Bf)
	if isMaster {
		var err error
		homePage, err = template.ParseFiles("webfiles/home.html")
		if err != nil {
			log.Fatal("unable to parse home.html", err)
		}

		debugServers := getDebugServers()
		for _, s := range debugServers {
			reverseProxy(s)
		}

		dbg.Lvl4("Log server", role, "running at :", addr)
		// /webfiles/Chart.js/Chart.min.js
		http.HandleFunc("/", homeHandler)
		fs := http.FileServer(http.Dir("webfiles/"))
		http.Handle("/webfiles/", http.StripPrefix("/webfiles/", fs))
	} else {
	retry:
		tries := 0
		var err error
		origin := "http://localhost/"
		url := "ws://" + master + "/_log"
		wsmaster, err = websocket.Dial(url, "", origin)
		if err != nil {
			tries += 1
			time.Sleep(time.Second)
			dbg.Lvl4("Slave log server could not connect to logger master (", master, ") .. Trying again (", tries, ")")
			goto retry
		}
		dbg.Lvl4("Slave Log server", role, "running at :", addr, "& connected to Master ")
	}
	http.Handle("/_log", websocket.Handler(logEntryHandler))
	http.Handle("/log", websocket.Handler(logHandler))
	http.HandleFunc("/htmllog", logHandlerHtml)
	http.HandleFunc("/htmllogrev", logHandlerHtmlReverse)
	dbg.Lvl1("Log-server", addr, "ready for service")
	log.Fatalln("ERROR: ", http.ListenAndServe(addr, nil))
	// now combine that port
}
Exemplo n.º 17
0
func thisIsAVeryLongFunctionNameThatWillOverflow() {
	dbg.Lvl1("Overflow")
}
Exemplo n.º 18
0
func (sn *Node) StartAnnouncement(am *AnnouncementMessage) error {
	sn.AnnounceLock.Lock()
	defer sn.AnnounceLock.Unlock()

	dbg.Lvl1("root", sn.Name(), "starting announcement round for round: ", sn.nRounds, "on view", sn.ViewNo)

	first := time.Now()
	total := time.Now()
	var firstRoundTime time.Duration
	var totalTime time.Duration

	ctx, cancel := context.WithTimeout(context.Background(), MAX_WILLING_TO_WAIT)
	var cancelederr error
	go func() {
		var err error
		if am.Vote != nil {
			err = sn.Propose(am.Vote.View, am, "")
		} else {
			err = sn.Announce(sn.ViewNo, am)
		}

		if err != nil {
			log.Errorln(err)
			cancelederr = err
			cancel()
		}
	}()

	// 1st Phase succeeded or connection error
	select {
	case _ = <-sn.commitsDone:
		// log time it took for first round to complete
		firstRoundTime = time.Since(first)
		sn.logFirstPhase(firstRoundTime)
		break
	case <-sn.closed:
		return errors.New("closed")
	case <-ctx.Done():
		log.Errorln(ctx.Err())
		if ctx.Err() == context.Canceled {
			return cancelederr
		}
		return errors.New("Really bad. Round did not finish commit phase and did not report network errors.")
	}

	// 2nd Phase succeeded or connection error
	select {
	case _ = <-sn.done:
		// log time it took for second round to complete
		totalTime = time.Since(total)
		sn.logSecondPhase(totalTime - firstRoundTime)
		sn.logTotalTime(totalTime)
		return nil
	case <-sn.closed:
		return errors.New("closed")
	case <-ctx.Done():
		log.Errorln(ctx.Err())
		if ctx.Err() == context.Canceled {
			return cancelederr
		}
		return errors.New("Really bad. Round did not finish response phase and did not report network errors.")
	}
}
Exemplo n.º 19
0
// Dispatch-function for running either client or server (mode-parameter)
func Run(app *config.AppConfig, conf *deploy.Config) {
	// Do some common setup
	if app.Mode == "client" {
		app.Hostname = app.Name
	}
	dbg.Lvl3(app.Hostname, "Starting to run")
	if conf.Debug > 1 {
		sign.DEBUG = true
	}

	if app.Hostname == "" {
		log.Fatal("no hostname given", app.Hostname)
	}

	// load the configuration
	dbg.Lvl3("loading configuration for", app.Hostname)
	var hc *config.HostConfig
	var err error
	s := GetSuite(conf.Suite)
	opts := config.ConfigOptions{ConnType: "tcp", Host: app.Hostname, Suite: s}
	if conf.Failures > 0 || conf.FFail > 0 {
		opts.Faulty = true
	}
	hc, err = config.LoadConfig("tree.json", opts)
	if err != nil {
		fmt.Println(err)
		log.Fatal(err)
	}

	// Wait for everybody to be ready before going on
	ioutil.WriteFile("coll_stamp_up/up"+app.Hostname, []byte("started"), 0666)
	for {
		_, err := os.Stat("coll_stamp_up")
		if err == nil {
			files, _ := ioutil.ReadDir("coll_stamp_up")
			dbg.Lvl4(app.Hostname, "waiting for others to finish", len(files))
			time.Sleep(time.Second)
		} else {
			break
		}
	}
	dbg.Lvl2(app.Hostname, "thinks everybody's here")

	// set FailureRates
	if conf.Failures > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailureRate = conf.Failures
		}
	}

	// set root failures
	if conf.RFail > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailAsRootEvery = conf.RFail

		}
	}
	// set follower failures
	// a follower fails on %ffail round with failureRate probability
	for i := range hc.SNodes {
		hc.SNodes[i].FailAsFollowerEvery = conf.FFail
	}

	defer func() {
		dbg.Lvl1("Collective Signing", app.Hostname, "has terminated in mode", app.Mode)
	}()

	switch app.Mode {
	case "client":
		log.Panic("No client mode")
	case "server":
		RunServer(app, conf, hc)
	}
}
Exemplo n.º 20
0
func main() {
	deter, err := deploy.ReadConfig("remote")
	if err != nil {
		log.Fatal("Couldn't read config in deter:", err)
	}
	conf = deter.Config
	dbg.DebugVisible = conf.Debug

	dbg.Lvl1("running deter with nmsgs:", conf.Nmsgs, "rate:", conf.Rate, "rounds:", conf.Rounds, "debug:", conf.Debug)

	virt, err := cliutils.ReadLines("remote/virt.txt")
	if err != nil {
		log.Fatal(err)
	}
	phys, err := cliutils.ReadLines("remote/phys.txt")
	if err != nil {
		log.Fatal(err)
	}
	vpmap := make(map[string]string)
	for i := range virt {
		vpmap[virt[i]] = phys[i]
	}
	// kill old processes
	var wg sync.WaitGroup
	doneHosts := make([]bool, len(phys))
	for i, h := range phys {
		wg.Add(1)
		go func(i int, h string) {
			defer wg.Done()
			dbg.Lvl4("Cleaning up host", h)
			cliutils.SshRun("", h, "sudo killall app forkexec logserver timeclient scp ssh 2>/dev/null >/dev/null")
			time.Sleep(1 * time.Second)
			cliutils.SshRun("", h, "sudo killall app 2>/dev/null >/dev/null")
			if dbg.DebugVisible > 3 {
				dbg.Lvl4("Killing report:")
				cliutils.SshRunStdout("", h, "ps ax")
			}
			doneHosts[i] = true
			dbg.Lvl3("Host", h, "cleaned up")
		}(i, h)
	}

	cleanupChannel := make(chan string)
	go func() {
		wg.Wait()
		dbg.Lvl3("Done waiting")
		cleanupChannel <- "done"
	}()
	select {
	case msg := <-cleanupChannel:
		dbg.Lvl3("Received msg from cleanupChannel", msg)
	case <-time.After(time.Second * 10):
		for i, m := range doneHosts {
			if !m {
				dbg.Lvl1("Missing host:", phys[i])
			}
		}
		dbg.Fatal("Didn't receive all replies.")
	}

	if kill {
		dbg.Lvl1("Returning only from cleanup")
		return
	}

	/*
		 * Why copy the stuff to the other nodes? We have NFS, no?
		for _, h := range phys {
			wg.Add(1)
			go func(h string) {
				defer wg.Done()
				cliutils.Rsync("", h, "remote", "")
			}(h)
		}
		wg.Wait()
	*/

	nloggers := conf.Nloggers
	masterLogger := phys[0]
	loggers := []string{masterLogger}
	dbg.Lvl3("Going to create", nloggers, "loggers")
	for n := 1; n < nloggers; n++ {
		loggers = append(loggers, phys[n])
	}

	phys = phys[nloggers:]
	virt = virt[nloggers:]

	// Read in and parse the configuration file
	file, err := ioutil.ReadFile("remote/tree.json")
	if err != nil {
		log.Fatal("deter.go: error reading configuration file: %v\n", err)
	}
	dbg.Lvl4("cfg file:", string(file))
	var cf config.ConfigFile
	err = json.Unmarshal(file, &cf)
	if err != nil {
		log.Fatal("unable to unmarshal config.ConfigFile:", err)
	}

	hostnames := cf.Hosts
	dbg.Lvl4("hostnames:", hostnames)

	depth := graphs.Depth(cf.Tree)
	var random_leaf string
	cf.Tree.TraverseTree(func(t *graphs.Tree) {
		if random_leaf != "" {
			return
		}
		if len(t.Children) == 0 {
			random_leaf = t.Name
		}
	})

	rootname = hostnames[0]

	dbg.Lvl4("depth of tree:", depth)

	// mapping from physical node name to the timestamp servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer := make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}

	// start up the logging server on the final host at port 10000
	dbg.Lvl1("starting up logservers: ", loggers)
	// start up the master logger
	loggerports := make([]string, len(loggers))
	for i, logger := range loggers {
		loggerport := logger + ":10000"
		loggerports[i] = loggerport
		// redirect to the master logger
		master := masterLogger + ":10000"
		// if this is the master logger than don't set the master to anything
		if loggerport == masterLogger+":10000" {
			master = ""
		}

		// Copy configuration file to make higher file-limits
		err = cliutils.SshRunStdout("", logger, "sudo cp remote/cothority.conf /etc/security/limits.d")

		if err != nil {
			log.Fatal("Couldn't copy limit-file:", err)
		}

		go cliutils.SshRunStdout("", logger, "cd remote; sudo ./logserver -addr="+loggerport+
			" -master="+master)
	}

	i := 0
	// For coll_stamp we have to wait for everything in place which takes quite some time
	// We set up a directory and every host writes a file once he's ready to listen
	// When everybody is ready, the directory is deleted and the test starts
	coll_stamp_dir := "remote/coll_stamp_up"
	if conf.App == "coll_stamp" || conf.App == "coll_sign" {
		os.RemoveAll(coll_stamp_dir)
		os.MkdirAll(coll_stamp_dir, 0777)
		time.Sleep(time.Second)
	}
	dbg.Lvl1("starting", len(physToServer), "forkexecs")
	totalServers := 0
	for phys, virts := range physToServer {
		if len(virts) == 0 {
			continue
		}
		totalServers += len(virts)
		dbg.Lvl1("Launching forkexec for", len(virts), "clients on", phys)
		//cmd := GenExecCmd(phys, virts, loggerports[i], random_leaf)
		i = (i + 1) % len(loggerports)
		wg.Add(1)
		go func(phys string) {
			//dbg.Lvl4("running on ", phys, cmd)
			defer wg.Done()
			dbg.Lvl4("Starting servers on physical machine ", phys)
			err := cliutils.SshRunStdout("", phys, "cd remote; sudo ./forkexec"+
				" -physaddr="+phys+" -logger="+loggerports[i])
			if err != nil {
				log.Fatal("Error starting timestamper:", err, phys)
			}
			dbg.Lvl4("Finished with Timestamper", phys)
		}(phys)
	}

	if conf.App == "coll_stamp" || conf.App == "coll_sign" {
		// Every stampserver that started up (mostly waiting for configuration-reading)
		// writes its name in coll_stamp_dir - once everybody is there, the directory
		// is cleaned to flag it's OK to go on.
		start_config := time.Now()
		for {
			files, err := ioutil.ReadDir(coll_stamp_dir)
			if err != nil {
				log.Fatal("Couldn't read directory", coll_stamp_dir, err)
			} else {
				dbg.Lvl1("Stampservers started:", len(files), "/", totalServers, "after", time.Since(start_config))
				if len(files) == totalServers {
					os.RemoveAll(coll_stamp_dir)
					// 1st second for everybody to see the deleted directory
					// 2nd second for everybody to start up listening
					time.Sleep(2 * time.Second)
					break
				}
			}
			time.Sleep(time.Second)
		}
	}

	switch conf.App {
	case "coll_stamp":
		dbg.Lvl1("starting", len(physToServer), "time clients")
		// start up one timeclient per physical machine
		// it requests timestamps from all the servers on that machine
		for p, ss := range physToServer {
			if len(ss) == 0 {
				continue
			}
			servers := strings.Join(ss, ",")
			go func(i int, p string) {
				_, err := cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app="+conf.App+
					" -name=client@"+p+
					" -server="+servers+
					" -logger="+loggerports[i])
				if err != nil {
					dbg.Lvl4("Deter.go : timeclient error ", err)
				}
				dbg.Lvl4("Deter.go : Finished with timeclient", p)
			}(i, p)
			i = (i + 1) % len(loggerports)
		}
	case "coll_sign_no":
		// TODO: for now it's only a simple startup from the server
		dbg.Lvl1("Starting only one client")
		/*
			p := physToServer[0][0]
			servers := strings.Join(physToServer[0][1], ",")
			_, err = cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app=" + conf.App +
			" -name=client@" + p +
			" -server=" + servers +
			" -logger=" + loggerports[i])
			i = (i + 1) % len(loggerports)
		*/
	}

	// wait for the servers to finish before stopping
	wg.Wait()
	//time.Sleep(10 * time.Minute)
}
Exemplo n.º 21
0
func RunClient(conf *deploy.Config, hc *config.HostConfig) {
	buck := make([]int64, 300)
	roundsAfter := make([]int64, MAX_N_ROUNDS)
	times := make([]int64, MAX_N_SECONDS*1000) // maximum number of milliseconds (maximum rate > 1 per millisecond)

	dbg.Lvl1("Going to run client and asking servers to print")
	time.Sleep(3 * time.Second)
	hc.SNodes[0].RegisterDoneFunc(RoundDone)
	start := time.Now()
	tFirst := time.Now()

	for i := 0; i < conf.Rounds; i++ {
		time.Sleep(time.Second)
		//fmt.Println("ANNOUNCING")
		hc.SNodes[0].LogTest = []byte("Hello World")
		dbg.Lvl3("Going to launch announcement ", hc.SNodes[0].Name())
		start = time.Now()
		t0 := time.Now()

		err := hc.SNodes[0].StartSigningRound()
		if err != nil {
			dbg.Lvl1(err)
		}

		select {
		case msg := <-done:
			dbg.Lvl3("Received reply from children", msg)
		case <-time.After(10 * ROUND_TIME):
			dbg.Lvl3("client timeouted on waiting for response from")
			continue
		}

		t := time.Since(t0)
		elapsed := time.Since(start)
		secToTimeStamp := t.Seconds()
		secSinceFirst := time.Since(tFirst).Seconds()
		atomic.AddInt64(&buck[int(secSinceFirst)], 1)
		index := int(secToTimeStamp) / int(ROUND_TIME/time.Second)
		atomic.AddInt64(&roundsAfter[index], 1)
		atomic.AddInt64(&times[i], t.Nanoseconds())
		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "root_announce",
			"round": i,
			"time":  elapsed,
		}).Info("")

		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "root_round",
			"round": i,
			"time":  elapsed,
		}).Info("root round")
	}

	log.WithFields(log.Fields{
		"file":        logutils.File(),
		"type":        "client_msg_stats",
		"buck":        removeTrailingZeroes(buck),
		"roundsAfter": removeTrailingZeroes(roundsAfter),
		"times":       removeTrailingZeroes(times),
	}).Info("")

	// And tell everybody to quit
	err := hc.SNodes[0].CloseAll(hc.SNodes[0].Round)
	if err != nil {
		log.Fatal("Couldn't close:", err)
	}
}
Exemplo n.º 22
0
// Monitor monitors log aggregates results into RunStats
func Monitor(bf int) RunStats {
	dbg.Lvl1("Starting monitoring")
	defer dbg.Lvl1("Done monitoring")
retry_dial:
	ws, err := websocket.Dial(fmt.Sprintf("ws://localhost:%d/log", port), "", "http://localhost/")
	if err != nil {
		time.Sleep(1 * time.Second)
		goto retry_dial
	}
retry:
	// Get HTML of webpage for data (NHosts, Depth, ...)
	doc, err := goquery.NewDocument(fmt.Sprintf("http://localhost:%d/", port))
	if err != nil {
		dbg.Lvl4("unable to get log data: retrying:", err)
		time.Sleep(10 * time.Second)
		goto retry
	}
	nhosts := doc.Find("#numhosts").First().Text()
	dbg.Lvl4("hosts:", nhosts)
	depth := doc.Find("#depth").First().Text()
	dbg.Lvl4("depth:", depth)
	nh, err := strconv.Atoi(nhosts)
	if err != nil {
		log.Fatal("unable to convert hosts to be a number:", nhosts)
	}
	d, err := strconv.Atoi(depth)
	if err != nil {
		log.Fatal("unable to convert depth to be a number:", depth)
	}
	clientDone := false
	rootDone := false
	var rs RunStats
	rs.NHosts = nh
	rs.Depth = d
	rs.BF = bf

	var M, S float64
	k := float64(1)
	first := true
	for {
		var data []byte
		err := websocket.Message.Receive(ws, &data)
		if err != nil {
			// if it is an eof error than stop reading
			if err == io.EOF {
				dbg.Lvl4("websocket terminated before emitting EOF or terminating string")
				break
			}
			continue
		}
		if bytes.Contains(data, []byte("EOF")) || bytes.Contains(data, []byte("terminating")) {
			dbg.Lvl2(
				"EOF/terminating Detected: need forkexec to report and clients: rootDone", rootDone, "clientDone", clientDone)
		}
		if bytes.Contains(data, []byte("root_round")) {
			dbg.Lvl4("root_round msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")")

			if clientDone || rootDone {
				dbg.Lvl4("Continuing searching data")
				// ignore after we have received our first EOF
				continue
			}
			var entry StatsEntry
			err := json.Unmarshal(data, &entry)
			if err != nil {
				log.Fatal("json unmarshalled improperly:", err)
			}
			if entry.Type != "root_round" {
				dbg.Lvl1("Wrong debugging message - ignoring")
				continue
			}
			dbg.Lvl4("root_round:", entry)
			if first {
				first = false
				dbg.Lvl4("Setting min-time to", entry.Time)
				rs.MinTime = entry.Time
				rs.MaxTime = entry.Time
			}
			if entry.Time < rs.MinTime {
				dbg.Lvl4("Setting min-time to", entry.Time)
				rs.MinTime = entry.Time
			} else if entry.Time > rs.MaxTime {
				rs.MaxTime = entry.Time
			}

			rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k

			var tM = M
			M += (entry.Time - tM) / k
			S += (entry.Time - tM) * (entry.Time - M)
			k++
			rs.StdDev = math.Sqrt(S / (k - 1))
		} else if bytes.Contains(data, []byte("schnorr_round")) {

			var entry StatsEntry
			err := json.Unmarshal(data, &entry)
			if err != nil {
				log.Fatal("json unmarshalled improperly:", err)
			}
			if entry.Type != "schnorr_round" {
				dbg.Lvl1("Wrong debugging message - ignoring")
				continue
			}
			dbg.Lvl4("schnorr_round:", entry)
			if first {
				first = false
				dbg.Lvl4("Setting min-time to", entry.Time)
				rs.MinTime = entry.Time
				rs.MaxTime = entry.Time
			}
			if entry.Time < rs.MinTime {
				dbg.Lvl4("Setting min-time to", entry.Time)
				rs.MinTime = entry.Time
			} else if entry.Time > rs.MaxTime {
				rs.MaxTime = entry.Time
			}

			rs.AvgTime = ((rs.AvgTime * (k - 1)) + entry.Time) / k

			var tM = M
			M += (entry.Time - tM) / k
			S += (entry.Time - tM) * (entry.Time - M)
			k++
			rs.StdDev = math.Sqrt(S / (k - 1))
		} else if bytes.Contains(data, []byte("schnorr_end")) {
			break
		} else if bytes.Contains(data, []byte("forkexec")) {
			if rootDone {
				continue
			}
			var ss SysStats
			err := json.Unmarshal(data, &ss)
			if err != nil {
				log.Fatal("unable to unmarshal forkexec:", ss)
			}
			rs.SysTime = ss.SysTime
			rs.UserTime = ss.UserTime
			dbg.Lvl4("forkexec:", ss)
			rootDone = true
			dbg.Lvl2("Monitor() Forkexec msg received (clientDone = ", clientDone, ", rootDone = ", rootDone, ")")
			if clientDone {
				break
			}
		} else if bytes.Contains(data, []byte("client_msg_stats")) {
			if clientDone {
				continue
			}
			var cms ClientMsgStats
			err := json.Unmarshal(data, &cms)
			if err != nil {
				log.Fatal("unable to unmarshal client_msg_stats:", string(data))
			}
			// what do I want to keep out of the Client Message States
			// cms.Buckets stores how many were processed at time T
			// cms.RoundsAfter stores how many rounds delayed it was
			//
			// get the average delay (roundsAfter), max and min
			// get the total number of messages timestamped
			// get the average number of messages timestamped per second?
			avg, _, _, _ := ArrStats(cms.Buckets)
			// get the observed rate of processed messages
			// avg is how many messages per second, we want how many milliseconds between messages
			observed := avg / 1000 // set avg to messages per milliseconds
			observed = 1 / observed
			rs.Rate = observed
			rs.Times = cms.Times
			dbg.Lvl2("Monitor() Client Msg stats received (clientDone = ", clientDone, ",rootDone = ", rootDone, ")")
			clientDone = true
			if rootDone {
				break
			}
		}
	}
	return rs
}
Exemplo n.º 23
0
func RunServer(hostname, app string, rounds int, rootwait int, debug int, testConnect bool,
	failureRate, rFail, fFail int, logger, suite string) {
	dbg.Lvl3(hostname, "Starting to run")
	if debug > 1 {
		sign.DEBUG = true
	}

	// fmt.Println("EXEC TIMESTAMPER: " + hostname)
	if hostname == "" {
		log.Fatal("no hostname given")
	}

	// load the configuration
	//dbg.Lvl3("loading configuration")
	var hc *config.HostConfig
	var err error
	s := GetSuite(suite)
	opts := config.ConfigOptions{ConnType: "tcp", Host: hostname, Suite: s}
	if failureRate > 0 || fFail > 0 {
		opts.Faulty = true
	}

	configTime := time.Now()
	hc, err = config.LoadConfig("tree.json", opts)
	if err != nil {
		fmt.Println(err)
		log.Fatal(err)
	}
	dbg.Lvl3(hostname, "finished loading config after", time.Since(configTime))

	for i := range hc.SNodes {
		// set FailureRates
		if failureRate > 0 {
			hc.SNodes[i].FailureRate = failureRate
		}
		// set root failures
		if rFail > 0 {
			hc.SNodes[i].FailAsRootEvery = rFail
		}
		// set follower failures
		// a follower fails on %ffail round with failureRate probability
		hc.SNodes[i].FailAsFollowerEvery = fFail
	}

	// Wait for everybody to be ready before going on
	ioutil.WriteFile("coll_stamp_up/up"+hostname, []byte("started"), 0666)
	for {
		_, err := os.Stat("coll_stamp_up")
		if err == nil {
			dbg.Lvl4(hostname, "waiting for others to finish")
			time.Sleep(time.Second)
		} else {
			break
		}
	}
	dbg.Lvl3(hostname, "thinks everybody's here")

	err = hc.Run(app != "coll_sign", sign.MerkleTree, hostname)
	if err != nil {
		log.Fatal(err)
	}

	defer func(sn *sign.Node) {
		//log.Panicln("program has terminated:", hostname)
		dbg.Lvl1("Program timestamper has terminated:", hostname)
		sn.Close()
	}(hc.SNodes[0])

	stampers, _, err := RunTimestamper(hc, 0, hostname)
	// get rid of the hc information so it can be GC'ed
	hc = nil
	if err != nil {
		log.Fatal(err)
	}
	for _, s := range stampers {
		// only listen if this is the hostname specified
		if s.Name() == hostname {
			s.Logger = logger
			s.Hostname = hostname
			s.App = app
			if s.IsRoot(0) {
				dbg.Lvl1("Root timestamper at:", hostname, rounds, "Waiting: ", rootwait)
				// wait for the other nodes to get set up
				time.Sleep(time.Duration(rootwait) * time.Second)

				dbg.Lvl1("Starting root-round")
				s.Run("root", rounds)
				// dbg.Lvl3("\n\nROOT DONE\n\n")

			} else if !testConnect {
				dbg.Lvl2("Running regular timestamper on:", hostname)
				s.Run("regular", rounds)
				// dbg.Lvl1("\n\nREGULAR DONE\n\n")
			} else {
				// testing connection
				dbg.Lvl1("Running connection-test on:", hostname)
				s.Run("test_connect", rounds)
			}
		}
	}
}