Exemplo n.º 1
0
// SynWithPeer will receive and send the public keys between the peer
// If all goes well, it will add the peer to the remotePeer array
// and notify to the channel synChan
func (p *Peer) synWithPeer(conn net.Conn) {
	// First we need to SYN mutually
	s := Syn{
		Id:     p.Id,
		Public: p.key.Public,
	}
	err := poly.SUITE.Write(conn, &s)
	if err != nil {
		dbg.Fatal(p.Name, "could not send SYN to ", conn.RemoteAddr().String())
	}
	// Receive the other SYN
	s2 := Syn{}
	err = poly.SUITE.Read(conn, &s2)
	if err != nil {
		dbg.Fatal(p.Name, "could not receive SYN from ", conn.RemoteAddr().String())
	}
	if s2.Id < 0 || s2.Id >= p.info.N {
		dbg.Fatal(p.Name, "received wrong SYN info from ", conn.RemoteAddr().String())
	}
	if p.pubKeys[s2.Id] != nil {
		dbg.Fatal(p.Name, "already received a SYN for this index ")
	}
	p.pubKeys[s2.Id] = s2.Public
	rp := RemotePeer{Conn: conn, Id: s2.Id, Hostname: conn.RemoteAddr().String()}
	p.remote[s2.Id] = rp
	dbg.Lvl3(p.String(), "has SYN'd with peer ", rp.String())
	p.synChan <- s2
}
Exemplo n.º 2
0
// BroadcastSIgnature will broadcast the given signature to every other peer
// AND will retrieve the signature of every other peer also !
func (p *Peer) BroadcastSignature(s *poly.SchnorrSig) []*poly.SchnorrSig {
	arr := make([]*poly.SchnorrSig, 0, p.info.N)
	arr = append(arr, s)
	err := p.SendToAll(s)
	if err != nil {
		dbg.Fatal(p.String(), "could not sent to everyone its schnorr sig")
	}

	sigChan := make(chan *poly.SchnorrSig)
	fn := func(rp RemotePeer) {
		sch := new(poly.SchnorrSig).Init(p.info)
		err := poly.SUITE.Read(rp.Conn, sch)
		if err != nil {
			dbg.Fatal(p.String(), "could not decode schnorr sig from ", rp.String())
		}
		sigChan <- sch
	}
	// wait for every peers's schnorr sig
	p.ForRemotePeers(fn)
	n := 0
	for {
		sig := <-sigChan
		arr = append(arr, sig)
		n += 1
		if n == p.info.N-1 {
			dbg.Lvl2(p.String(), "received every other schnorr sig.")
			break
		}
	}

	return arr
}
Exemplo n.º 3
0
// ComputeSharedSecret will make the exchange of dealers between
// the peers and will compute the sharedsecret at the end
func (p *Peer) ComputeSharedSecret() *poly.SharedSecret {
	// Construct the dealer
	dealerKey := cliutils.KeyPair(poly.SUITE)
	dealer := poly.NewDealer(p.info, &p.key, &dealerKey, p.pubKeys)
	// Construct the receiver
	receiver := poly.NewReceiver(p.info, &p.key)
	// add already its own dealer
	_, err := receiver.AddDealer(p.Id, dealer)
	if err != nil {
		dbg.Fatal(p.String(), "could not add its own dealer >< ABORT")
	}

	// Send the dealer struct TO every one
	err = p.SendToAll(dealer)
	dbg.Lvl2(p.Name, "sent its dealer to every peers. (err = ", err, ")")
	// Receive the dealer struct FROM every one
	// wait with a chan to get ALL dealers
	dealChan := make(chan *poly.Dealer)
	for _, rp := range p.remote {
		go func(rp RemotePeer) {
			d := new(poly.Dealer).UnmarshalInit(p.info)
			err := poly.SUITE.Read(rp.Conn, d)
			if err != nil {
				dbg.Fatal(p.Name, " received a strange dealer from ", rp.String())
			}
			dealChan <- d
		}(rp)
	}

	// wait to get all dealers
	dbg.Lvl3(p.Name, "wait to receive every other peer's dealer...")
	n := 0
	for {
		// get the dealer and add it
		d := <-dealChan
		dbg.Lvl3(p.Name, "collected one more dealer (count = ", n, ")")
		// TODO: get the response back to the dealer
		_, err := receiver.AddDealer(p.Id, d)
		if err != nil {
			dbg.Fatal(p.Name, "has error when adding the dealer : ", err)
		}
		n += 1
		// we get enough dealers to compute the shared secret
		if n == p.info.T-1 {
			dbg.Lvl2(p.Name, "received every Dealers")
			break
		}
	}

	sh, err := receiver.ProduceSharedSecret()
	if err != nil {
		dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err ", err, ")")
	}
	dbg.Lvl2(p.Name, "produced shared secret !")
	return sh
}
Exemplo n.º 4
0
// SchnorrSigRoot will first generate a
// random shared secret, then start a new round
// It will wait for the partial sig of the peers
// to finally render a SchnorrSig struct
func (p *Peer) SchnorrSigRoot(msg []byte) *poly.SchnorrSig {
	// First, gen. a random secret
	random := p.ComputeSharedSecret()
	// launch the new round
	err := p.schnorr.NewRound(random, msg)
	if err != nil {
		dbg.Fatal(p.String(), "could not make a new round : ", err)
	}

	// compute its own share of the signature
	ps := p.schnorr.RevealPartialSig()
	// add its own
	p.schnorr.AddPartialSig(ps)

	// no need to send to all if you are the root
	//	p.SendToAll(ps)
	// then receive every partial sig
	sigChan := make(chan *poly.PartialSchnorrSig)
	fn := func(rp RemotePeer) {
		psig := new(poly.PartialSchnorrSig)
		err := poly.SUITE.Read(rp.Conn, psig)
		if err != nil {
			dbg.Fatal(p.String(), "could not decode PartialSig of ", rp.String())
		}
		sigChan <- psig
	}
	p.ForRemotePeers(fn)

	// wait for all partial sig to be received
	n := 0
	for {
		psig := <-sigChan
		err := p.schnorr.AddPartialSig(psig)
		if err != nil {
			dbg.Fatal(p.String(), "could not add the partial signature received : ", err)
		}
		n += 1
		if n == p.info.N-1 {
			dbg.Lvl2(p.String(), "received every other partial sig.")
			break
		}
	}

	sign, err := p.schnorr.SchnorrSig()
	if err != nil {
		dbg.Fatal(p.String(), "could not generate the global SchnorrSig", err)
	}
	return sign
}
Exemplo n.º 5
0
func (p *Peer) Listen() {
	results := strings.Split(p.Name, ":")
	port := ":" + results[1]
	ln, err := net.Listen("tcp", port)
	if err != nil {
		dbg.Fatal(p.Name, ": Error while listening on port ", port, "ABORT  => ", err)
	}
	for {
		conn, err := ln.Accept()
		if err != nil {
			dbg.Fatal(p.Name, ": Error while listening on port ", port, " => ", err)
		}
		go p.synWithPeer(conn)
	}
}
Exemplo n.º 6
0
// Wait for the end of the alo so we can close connection nicely
func (p *Peer) WaitFins() {
	p.wgFin.Add(len(p.remote))
	fn := func(rp RemotePeer) {
		f := Finish{p.Id}
		err := poly.SUITE.Write(rp.Conn, &f)
		if err != nil {
			dbg.Fatal(p.String(), "could not send FIN to ", rp.String())
		}
		p.wgFin.Done()
	}
	p.ForRemotePeers(fn)
	dbg.Lvl2(p.String(), "waiting to send all FIN's packets")
	p.wgFin.Wait()
	// close all connections
	for _, rp := range p.remote {
		rp.Conn.Close()
	}
	dbg.Lvl2(p.String(), "close every connections")
	//for {
	//	f := <-p.finChan
	//	rp, ok := p.remote[f.Id]
	//	if !ok {
	//		dbg.Lvl2(p.Name, "received invalid FIN : wrong ID ", rp.Id, " ... ")
	//	} else {
	//		rp.Conn.Close()
	//		dbg.Lvl2(p.Name, "received FIN from ", rp.String(), " => closed connection")
	//	}
	//}
}
Exemplo n.º 7
0
// WaitAcks will make  a peer  waits for all others peers to send an ACK to it
func (p *Peer) WaitACKs() {
	var wg sync.WaitGroup
	fn := func(rp RemotePeer) {
		a := Ack{}
		err := poly.SUITE.Read(rp.Conn, &a)
		if err != nil {
			dbg.Fatal(p.Name, "could not receive an ACK from ", rp.String(), " (err ", err, ")")
		}
		//p.ackChan <- a
		wg.Done()
	}
	wg.Add(len(p.remote))
	p.ForRemotePeers(fn)

	dbg.Lvl3(p.Name, "is waiting for acks ...")
	wg.Wait()
	dbg.Lvl2(p.String(), "received ALL ACKs")
	//n := 0
	//for {
	//	a := <-p.ackChan
	//	if a.Valid {
	//		n += 1
	//	}
	//	if n == p.info.N-1 {
	//		dbg.Lvl2(p.Name, "received all acks. Continue")
	//		break
	//	}
	//}
}
Exemplo n.º 8
0
// ConnectTo will connect to the given host and start the SYN exchange (public key + id)
func (p *Peer) ConnectTo(host string) error {
	tick := time.NewTicker(ConnWaitRetry)
	count := 0
	for range tick.C {
		// connect
		conn, err := net.Dial("tcp", host)
		if err != nil {
			// we have tried too many times => abort
			if count == ConnRetry {
				tick.Stop()
				dbg.Fatal(p.Name, "could not connect to", host, " ", ConnRetry, "times. Abort.")
				// let's try again one more time
			} else {
				dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in ", ConnWaitRetry.String())
				count += 1
			}
		}
		// handle successful connection
		dbg.Lvl3(p.Name, "has connected with peer ", host)
		tick.Stop()
		// start to syn with the respective peer
		go p.synWithPeer(conn)
		break
	}
	return nil
}
Exemplo n.º 9
0
// SendACKS will send an ACK to everyone
func (p *Peer) SendACKs() {
	a := Ack{
		Id:    p.Id,
		Valid: true,
	}
	err := p.SendToAll(&a)
	if err != nil {
		dbg.Fatal(p.Name, "could not sent its ACKs to every one : ", err)
	}
}
Exemplo n.º 10
0
func (p *Peer) SchnorrSigPeer(msg []byte) {
	// First, gen. a random secret
	random := p.ComputeSharedSecret()
	// launch the new round
	err := p.schnorr.NewRound(random, msg)
	if err != nil {
		dbg.Fatal(p.String(), "could not make a new round : ", err)
	}

	// compute its own share of the signature
	ps := p.schnorr.RevealPartialSig()
	// then send it to root only
	p.SendToRoot(ps)
}
Exemplo n.º 11
0
// WaitSYNs will wait until every peers has syn'd with this one
func (p *Peer) WaitSYNs() {
	for {
		s := <-p.synChan
		dbg.Lvl3(p.Name, " synChan received Syn id ", s.Id)
		_, ok := p.remote[s.Id]
		if !ok {
			dbg.Fatal(p.Name, "received syn'd notification of an unknown peer... ABORT")
		}
		if len(p.remote) == p.info.N-1 {
			dbg.Lvl2(p.Name, "is SYN'd with every one")
			break
		}
	}
}
Exemplo n.º 12
0
func main() {
	deter, err := deploy.ReadConfig("remote")
	if err != nil {
		log.Fatal("Couldn't read config in deter:", err)
	}
	conf = deter.Config
	dbg.DebugVisible = conf.Debug

	dbg.Lvl1("running deter with nmsgs:", conf.Nmsgs, "rate:", conf.Rate, "rounds:", conf.Rounds, "debug:", conf.Debug)

	virt, err := cliutils.ReadLines("remote/virt.txt")
	if err != nil {
		log.Fatal(err)
	}
	phys, err := cliutils.ReadLines("remote/phys.txt")
	if err != nil {
		log.Fatal(err)
	}
	vpmap := make(map[string]string)
	for i := range virt {
		vpmap[virt[i]] = phys[i]
	}
	// kill old processes
	var wg sync.WaitGroup
	doneHosts := make([]bool, len(phys))
	for i, h := range phys {
		wg.Add(1)
		go func(i int, h string) {
			defer wg.Done()
			dbg.Lvl4("Cleaning up host", h)
			cliutils.SshRun("", h, "sudo killall app forkexec logserver timeclient scp ssh 2>/dev/null >/dev/null")
			time.Sleep(1 * time.Second)
			cliutils.SshRun("", h, "sudo killall app 2>/dev/null >/dev/null")
			if dbg.DebugVisible > 3 {
				dbg.Lvl4("Killing report:")
				cliutils.SshRunStdout("", h, "ps ax")
			}
			doneHosts[i] = true
			dbg.Lvl3("Host", h, "cleaned up")
		}(i, h)
	}

	cleanupChannel := make(chan string)
	go func() {
		wg.Wait()
		dbg.Lvl3("Done waiting")
		cleanupChannel <- "done"
	}()
	select {
	case msg := <-cleanupChannel:
		dbg.Lvl3("Received msg from cleanupChannel", msg)
	case <-time.After(time.Second * 10):
		for i, m := range doneHosts {
			if !m {
				dbg.Lvl1("Missing host:", phys[i])
			}
		}
		dbg.Fatal("Didn't receive all replies.")
	}

	if kill {
		dbg.Lvl1("Returning only from cleanup")
		return
	}

	/*
		 * Why copy the stuff to the other nodes? We have NFS, no?
		for _, h := range phys {
			wg.Add(1)
			go func(h string) {
				defer wg.Done()
				cliutils.Rsync("", h, "remote", "")
			}(h)
		}
		wg.Wait()
	*/

	nloggers := conf.Nloggers
	masterLogger := phys[0]
	loggers := []string{masterLogger}
	dbg.Lvl3("Going to create", nloggers, "loggers")
	for n := 1; n < nloggers; n++ {
		loggers = append(loggers, phys[n])
	}

	phys = phys[nloggers:]
	virt = virt[nloggers:]

	// Read in and parse the configuration file
	file, err := ioutil.ReadFile("remote/tree.json")
	if err != nil {
		log.Fatal("deter.go: error reading configuration file: %v\n", err)
	}
	dbg.Lvl4("cfg file:", string(file))
	var cf config.ConfigFile
	err = json.Unmarshal(file, &cf)
	if err != nil {
		log.Fatal("unable to unmarshal config.ConfigFile:", err)
	}

	hostnames := cf.Hosts
	dbg.Lvl4("hostnames:", hostnames)

	depth := graphs.Depth(cf.Tree)
	var random_leaf string
	cf.Tree.TraverseTree(func(t *graphs.Tree) {
		if random_leaf != "" {
			return
		}
		if len(t.Children) == 0 {
			random_leaf = t.Name
		}
	})

	rootname = hostnames[0]

	dbg.Lvl4("depth of tree:", depth)

	// mapping from physical node name to the timestamp servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer := make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}

	// start up the logging server on the final host at port 10000
	dbg.Lvl1("starting up logservers: ", loggers)
	// start up the master logger
	loggerports := make([]string, len(loggers))
	for i, logger := range loggers {
		loggerport := logger + ":10000"
		loggerports[i] = loggerport
		// redirect to the master logger
		master := masterLogger + ":10000"
		// if this is the master logger than don't set the master to anything
		if loggerport == masterLogger+":10000" {
			master = ""
		}

		// Copy configuration file to make higher file-limits
		err = cliutils.SshRunStdout("", logger, "sudo cp remote/cothority.conf /etc/security/limits.d")

		if err != nil {
			log.Fatal("Couldn't copy limit-file:", err)
		}

		go cliutils.SshRunStdout("", logger, "cd remote; sudo ./logserver -addr="+loggerport+
			" -master="+master)
	}

	i := 0
	// For coll_stamp we have to wait for everything in place which takes quite some time
	// We set up a directory and every host writes a file once he's ready to listen
	// When everybody is ready, the directory is deleted and the test starts
	coll_stamp_dir := "remote/coll_stamp_up"
	if conf.App == "coll_stamp" || conf.App == "coll_sign" {
		os.RemoveAll(coll_stamp_dir)
		os.MkdirAll(coll_stamp_dir, 0777)
		time.Sleep(time.Second)
	}
	dbg.Lvl1("starting", len(physToServer), "forkexecs")
	totalServers := 0
	for phys, virts := range physToServer {
		if len(virts) == 0 {
			continue
		}
		totalServers += len(virts)
		dbg.Lvl1("Launching forkexec for", len(virts), "clients on", phys)
		//cmd := GenExecCmd(phys, virts, loggerports[i], random_leaf)
		i = (i + 1) % len(loggerports)
		wg.Add(1)
		go func(phys string) {
			//dbg.Lvl4("running on ", phys, cmd)
			defer wg.Done()
			dbg.Lvl4("Starting servers on physical machine ", phys)
			err := cliutils.SshRunStdout("", phys, "cd remote; sudo ./forkexec"+
				" -physaddr="+phys+" -logger="+loggerports[i])
			if err != nil {
				log.Fatal("Error starting timestamper:", err, phys)
			}
			dbg.Lvl4("Finished with Timestamper", phys)
		}(phys)
	}

	if conf.App == "coll_stamp" || conf.App == "coll_sign" {
		// Every stampserver that started up (mostly waiting for configuration-reading)
		// writes its name in coll_stamp_dir - once everybody is there, the directory
		// is cleaned to flag it's OK to go on.
		start_config := time.Now()
		for {
			files, err := ioutil.ReadDir(coll_stamp_dir)
			if err != nil {
				log.Fatal("Couldn't read directory", coll_stamp_dir, err)
			} else {
				dbg.Lvl1("Stampservers started:", len(files), "/", totalServers, "after", time.Since(start_config))
				if len(files) == totalServers {
					os.RemoveAll(coll_stamp_dir)
					// 1st second for everybody to see the deleted directory
					// 2nd second for everybody to start up listening
					time.Sleep(2 * time.Second)
					break
				}
			}
			time.Sleep(time.Second)
		}
	}

	switch conf.App {
	case "coll_stamp":
		dbg.Lvl1("starting", len(physToServer), "time clients")
		// start up one timeclient per physical machine
		// it requests timestamps from all the servers on that machine
		for p, ss := range physToServer {
			if len(ss) == 0 {
				continue
			}
			servers := strings.Join(ss, ",")
			go func(i int, p string) {
				_, err := cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app="+conf.App+
					" -name=client@"+p+
					" -server="+servers+
					" -logger="+loggerports[i])
				if err != nil {
					dbg.Lvl4("Deter.go : timeclient error ", err)
				}
				dbg.Lvl4("Deter.go : Finished with timeclient", p)
			}(i, p)
			i = (i + 1) % len(loggerports)
		}
	case "coll_sign_no":
		// TODO: for now it's only a simple startup from the server
		dbg.Lvl1("Starting only one client")
		/*
			p := physToServer[0][0]
			servers := strings.Join(physToServer[0][1], ",")
			_, err = cliutils.SshRun("", p, "cd remote; sudo ./app -mode=client -app=" + conf.App +
			" -name=client@" + p +
			" -server=" + servers +
			" -logger=" + loggerports[i])
			i = (i + 1) % len(loggerports)
		*/
	}

	// wait for the servers to finish before stopping
	wg.Wait()
	//time.Sleep(10 * time.Minute)
}
Exemplo n.º 13
0
func RunServer(hosts *config.HostsConfig, app *config.AppConfig, depl *deploy.Config) {
	s := config.GetSuite(depl.Suite)
	poly.SUITE = s
	poly.SECURITY = poly.MODERATE
	n := len(hosts.Hosts)

	info := poly.PolyInfo{
		N: n,
		R: n,
		T: n,
	}
	indexPeer := -1
	for i, h := range hosts.Hosts {
		if h == app.Hostname {
			indexPeer = i
			break
		}
	}
	if indexPeer == -1 {
		log.Fatal("Peer ", app.Hostname, "(", app.PhysAddr, ") did not find any match for its name.Abort")
	}

	start := time.Now()
	dbg.Lvl1("Creating new peer ", app.Hostname, "(", app.PhysAddr, ") ...")
	// indexPeer == 0 <==> peer is root
	p := NewPeer(indexPeer, app.Hostname, info, indexPeer == 0)

	// make it listen
	dbg.Lvl2("Peer", app.Hostname, "is now listening for incoming connections")
	go p.Listen()

	// then connect it to its successor in the list
	for _, h := range hosts.Hosts[indexPeer+1:] {
		dbg.Lvl2("Peer ", app.Hostname, " will connect to ", h)
		// will connect and SYN with the remote peer
		p.ConnectTo(h)
	}
	// Wait until this peer is connected / SYN'd with each other peer
	p.WaitSYNs()

	if p.IsRoot() {
		delta := time.Since(start)
		dbg.Lvl2(p.String(), "Connections accomplished in", delta)
		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "schnorr_connect",
			"round": 0,
			"time":  delta,
		}).Info("")
	}

	// start to record
	start = time.Now()

	// Setup the schnorr system amongst peers
	p.SetupDistributedSchnorr()
	p.SendACKs()
	p.WaitACKs()
	dbg.Lvl1(p.String(), "completed Schnorr setup")

	// send setup time if we're root
	if p.IsRoot() {
		delta := time.Since(start)
		dbg.Lvl2(p.String(), "setup accomplished in ", delta)
		log.WithFields(log.Fields{
			"file":  logutils.File(),
			"type":  "schnorr_setup",
			"round": 0,
			"time":  delta,
		}).Info("")
	}

	for round := 0; round < depl.Rounds; round++ {
		if p.IsRoot() {
			dbg.Lvl2("Starting round", round)
		}

		// Then issue a signature !
		start = time.Now()
		msg := "hello world"

		// Only root calculates if it's OK and sends a log-message
		if p.IsRoot() {
			sig := p.SchnorrSigRoot([]byte(msg))
			err := p.VerifySchnorrSig(sig, []byte(msg))
			if err != nil {
				dbg.Fatal(p.String(), "could not verify schnorr signature :/ ", err)
			}

			dbg.Lvl2(p.String(), "verified the schnorr sig !")
			// record time
			delta := time.Since(start)
			dbg.Lvl2(p.String(), "signature done in ", delta)
			log.WithFields(log.Fields{
				"file":  logutils.File(),
				"type":  "schnorr_round",
				"round": round,
				"time":  delta,
			}).Info("")
		} else {
			// Compute the partial sig and send it to the root
			p.SchnorrSigPeer([]byte(msg))
		}
	}

	p.WaitFins()
	dbg.Lvl1(p.String(), "is leaving ...")

	if p.IsRoot() {
		log.WithFields(log.Fields{
			"file": logutils.File(),
			"type": "schnorr_end",
		}).Info("")
	}
}