Exemple #1
0
func testNodes(hostnames []string, failed map[string]bool) ([]string, []byte) {
	var mu sync.Mutex
	var edgelist []byte
	var wg sync.WaitGroup
	for _, host := range hostnames {
		log.Println("latency test:", host)
		if _, ok := failed[host]; ok {
			continue
		}
		wg.Add(1)
		go func(host string) {
			defer wg.Done()
			starttime := time.Now()
			// kill latent processes
			err := cliutils.TimeoutRun(10*time.Second,
				func() error {
					return cliutils.SshRunStdout(uname, host, "killall logserver; killall timeclient; killall latency_test; killall cocoexec; rm -rf cocoexec")
				})
			if err != nil {
				log.Println("Failed:", host, err)
				mu.Lock()
				failed[host] = true
				mu.Unlock()
				return
			}

			// run the latency test
			log.Println("running latency_test:", host)
			output, err := cliutils.SshRun(uname, host, "./latency_test -hostfile=hosts.txt -hostname="+host)
			if err != nil {
				log.Println("Failed:", host, err)
				mu.Lock()
				failed[host] = true
				mu.Unlock()
				return
			}

			// if this took to long say that this has failed
			if time.Since(starttime) > (20 * time.Minute) {
				log.Println("Failed:", host, err)
				mu.Lock()
				failed[host] = true
				mu.Unlock()
				return
			}
			fmt.Println("output:", string(output))
			mu.Lock()
			edgelist = append(edgelist, output...)
			mu.Unlock()
			return
		}(host)
	}
	wg.Wait()
	log.Println("latency test done")
	goodhosts := make([]string, 0, len(hostnames)-len(failed))
	for _, h := range hostnames {
		if !failed[h] {
			goodhosts = append(goodhosts, h)
		}
	}
	return goodhosts, edgelist
}
Exemple #2
0
func main() {
	flag.Parse()
	content, err := ioutil.ReadFile(hostfile)
	if err != nil {
		log.Fatal(hostfile, ": ", err)
	}
	// get the specified hostnames from the file
	hostnames := strings.Fields(string(content))
	log.Println("hostnames: ", hostnames)
	content, err = ioutil.ReadFile(clientfile)
	if err != nil {
		log.Fatal(err)
	}
	// get the specified hostnames from the file
	clientnames := strings.Fields(string(content))
	log.Println("clientnames: ", clientnames)

	if err := cliutils.Build("../latency_test", "386", "linux"); err != nil {
		log.Fatal(err)
	}
	if err := cliutils.Build("../exec", "386", "linux"); err != nil {
		log.Fatal(err)
	}
	if app == "time" {
		if err := cliutils.Build("../timeclient", "386", "linux"); err != nil {
			log.Fatal(err)
		}
	}

	// test the latency between nodes and remove bad ones
	uniquehosts := getUniqueHosts(hostnames)
	uniqueclients := getUniqueHosts(clientnames)

	if kill {
		var wg sync.WaitGroup
		for _, h := range hostnames {
			wg.Add(1)
			go func(h string) {
				defer wg.Done()
				cliutils.SshRun(uname, h, "killall logserver; killall timeclient; killall latency_test; killall cocoexec; rm -rf cocoexec; rm -rf latency_test; rm -rf timeclient")
			}(h)
		}
		for _, h := range clientnames {
			wg.Add(1)
			go func(h string) {
				defer wg.Done()
				cliutils.SshRun(uname, h, "killall logserver; killall timeclient; killall latency_test; killall cocoexec; rm -rf cocoexec; rm -rf latency_test; rm -rf timeclient")
			}(h)
		}
		wg.Wait()
		return
	}
	log.Println("uniquehosts: ", uniquehosts)
	failed := scpTestFiles(uniquehosts)
	goodhosts, edgelist := testNodes(hostnames, failed)
	hostnames = goodhosts

	// create a new graph with just these good nodes
	g := graphs.NewGraph(goodhosts)
	g.LoadEdgeList(edgelist)

	// this network into a tree with the given depth
	log.Println("CONSTRUCTING TREE OF DEPTH:", depth)
	t := g.Tree(depth)

	// generate the public and private keys for each node in this tree
	suite := nist.NewAES128SHA256P256()
	t.GenKeys(nist.NewAES128SHA256P256(), suite.Cipher([]byte("example")))
	log.Println("tree:", t)

	// turn this config into a config file for deployment
	cf := config.ConfigFromTree(t, goodhosts)

	// give each host in the file the specified port
	cf.AddPorts(port)

	log.Println("config file contents:", cf)
	b, err := json.Marshal(cf)
	if err != nil {
		log.Fatal(err)
	}

	// write this file out to disk for scp
	log.Println("config file:", string(b))
	err = ioutil.WriteFile("cfg.json", b, 0644)
	if err != nil {
		log.Fatal(err)
	}
	log.Infoln("setting up logger")
	setupLogger()
	log.Infoln("set up logger")
	uniqueclients = getUniqueHosts(clientnames)
	uniquehosts = getUniqueHosts(cf.Hosts)
	log.Infoln("running clients and servers")
	scpClientFiles(uniqueclients)
	scpServerFiles(uniquehosts)
	runClients(clientnames, cf.Hosts, cps)
	deployServers(cf.Hosts)
}
Exemple #3
0
func main() {
	flag.Parse()
	log.SetFlags(log.Lshortfile)
	fmt.Println("running deter with nmsgs:", nmsgs, rate, rounds)

	virt, err := cliutils.ReadLines("remote/virt.txt")
	if err != nil {
		log.Fatal(err)
	}
	phys, err := cliutils.ReadLines("remote/phys.txt")
	if err != nil {
		log.Fatal(err)
	}
	vpmap := make(map[string]string)
	for i := range virt {
		vpmap[virt[i]] = phys[i]
	}
	// kill old processes
	var wg sync.WaitGroup
	for _, h := range phys {
		wg.Add(1)
		go func(h string) {
			defer wg.Done()
			cliutils.SshRun("", h, "sudo killall exec logserver timeclient scp ssh 2>/dev/null >/dev/null")
			time.Sleep(1 * time.Second)
			cliutils.SshRun("", h, "sudo killall forkexec 2>/dev/null >/dev/null")
		}(h)
	}
	wg.Wait()

	if kill {
		return
	}

	for _, h := range phys {
		wg.Add(1)
		go func(h string) {
			defer wg.Done()
			cliutils.Rsync("", h, "remote", "")
		}(h)
	}
	wg.Wait()

	nloggers := 3
	masterLogger := phys[0]
	slaveLogger1 := phys[1]
	slaveLogger2 := phys[2]
	loggers := []string{masterLogger, slaveLogger1, slaveLogger2}

	phys = phys[nloggers:]
	virt = virt[nloggers:]

	// Read in and parse the configuration file
	file, err := ioutil.ReadFile("remote/cfg.json")
	if err != nil {
		log.Fatal("deter.go: error reading configuration file: %v\n", err)
	}
	log.Println("cfg file:", string(file))
	var cf config.ConfigFile
	err = json.Unmarshal(file, &cf)
	if err != nil {
		log.Fatal("unable to unmarshal config.ConfigFile:", err)
	}

	hostnames := cf.Hosts

	depth := graphs.Depth(cf.Tree)
	var random_leaf string
	cf.Tree.TraverseTree(func(t *graphs.Tree) {
		if random_leaf != "" {
			return
		}
		if len(t.Children) == 0 {
			random_leaf = t.Name
		}
	})

	rootname = hostnames[0]

	log.Println("depth of tree:", depth)

	// mapping from physical node name to the timestamp servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer := make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}

	// start up the logging server on the final host at port 10000
	fmt.Println("starting up logserver")
	// start up the master logger
	loggerports := make([]string, len(loggers))
	for i, logger := range loggers {
		loggerport := logger + ":10000"
		loggerports[i] = loggerport
		// redirect to the master logger
		master := masterLogger + ":10000"
		// if this is the master logger than don't set the master to anything
		if loggerport == masterLogger+":10000" {
			master = ""
		}

		go cliutils.SshRunStdout("", logger, "cd remote/logserver; sudo ./logserver -addr="+loggerport+
			" -hosts="+strconv.Itoa(len(hostnames))+
			" -depth="+strconv.Itoa(depth)+
			" -bf="+bf+
			" -hpn="+hpn+
			" -nmsgs="+nmsgs+
			" -rate="+strconv.Itoa(rate)+
			" -master="+master)
	}

	// wait a little bit for the logserver to start up
	time.Sleep(5 * time.Second)
	fmt.Println("starting time clients")

	// start up one timeclient per physical machine
	// it requests timestamps from all the servers on that machine
	i := 0
	for p, ss := range physToServer {
		if len(ss) == 0 {
			continue
		}
		servers := strings.Join(ss, ",")
		go func(i int, p string) {
			_, err := cliutils.SshRun("", p, "cd remote; sudo ./timeclient -nmsgs="+nmsgs+
				" -name=client@"+p+
				" -server="+servers+
				" -logger="+loggerports[i]+
				" -debug="+debug+
				" -rate="+strconv.Itoa(rate))
			if err != nil {
				log.Println(err)
			}
		}(i, p)
		i = (i + 1) % len(loggerports)
	}
	rootwait := strconv.Itoa(10)
	for phys, virts := range physToServer {
		if len(virts) == 0 {
			continue
		}
		log.Println("starting timestamper")
		cmd := GenExecCmd(rFail, fFail, failures, phys, virts, loggerports[i], rootwait, random_leaf)
		i = (i + 1) % len(loggerports)
		wg.Add(1)
		//time.Sleep(500 * time.Millisecond)
		go func(phys, cmd string) {
			//log.Println("running on ", phys, cmd)
			defer wg.Done()
			err := cliutils.SshRunStdout("", phys, cmd)
			if err != nil {
				log.Fatal("ERROR STARTING TIMESTAMPER:", err)
			}
		}(phys, cmd)

	}
	// wait for the servers to finish before stopping
	wg.Wait()
	time.Sleep(10 * time.Minute)
}