Example #1
0
// Generate all commands on one single physicial machines to launch every "nodes"
func GenExecCmd(phys string, names []string, loggerport, random_leaf string) string {
	dbg.Lvl3("Random_leaf", random_leaf)
	dbg.Lvl3("Names", names)
	connect := false
	cmd := ""
	bg := " & "
	for i, name := range names {
		dbg.Lvl3("deter.go Generate cmd timestamper : name ==", name)
		dbg.Lvl3("random_leaf ==", random_leaf)
		dbg.Lvl3("testconnect is", deter.TestConnect)
		if name == random_leaf && deter.TestConnect {
			connect = true
		}
		amroot := " -amroot=false"
		if name == rootname {
			amroot = " -amroot=true"
		}

		if i == len(names)-1 {
			bg = ""
		}
		cmd += "(cd remote; sudo ./forkexec" +
			" -physaddr=" + phys +
			" -hostname=" + name +
			" -logger=" + loggerport +
			" -test_connect=" + strconv.FormatBool(connect) +
			amroot + bg +
			" ); "
	}
	return cmd
}
Example #2
0
// Get gets data from the connection.
// Returns io.EOF on an irrecoveralbe error.
// Returns given error if it is Temporary.
func (tc *TCPConn) Get(bum BinaryUnmarshaler) error {
	if tc.Closed() {
		dbg.Lvl3("tcpconn: get: connection closed")
		return ErrClosed
	}
	tc.encLock.Lock()
	for tc.dec == nil {
		tc.encLock.Unlock()
		return ErrNotEstablished
	}
	dec := tc.dec
	tc.encLock.Unlock()

	if Latency != 0 {
		time.Sleep(time.Duration(rand.Intn(Latency)) * time.Millisecond)
	}
	err := dec.Decode(bum)
	if err != nil {
		if IsTemporary(err) {
			return err
		}
		// if it is an irrecoverable error
		// close the channel and return that it has been closed
		if err != io.EOF && err.Error() != "read tcp4" {
			dbg.Lvl2("Couldn't decode packet at", tc.name, "error:", err)
		} else {
			dbg.Lvl3("Closing connection by EOF")
		}
		tc.Close()
		return ErrClosed
	}
	return err
}
Example #3
0
// ComputeSharedSecret will make the exchange of dealers between
// the peers and will compute the sharedsecret at the end
func (p *Peer) ComputeSharedSecret() *poly.SharedSecret {
	// Construct the dealer
	dealerKey := cliutils.KeyPair(poly.SUITE)
	dealer := poly.NewDealer(p.info, &p.key, &dealerKey, p.pubKeys)
	// Construct the receiver
	receiver := poly.NewReceiver(p.info, &p.key)
	// add already its own dealer
	_, err := receiver.AddDealer(p.Id, dealer)
	if err != nil {
		dbg.Fatal(p.String(), "could not add its own dealer >< ABORT")
	}

	// Send the dealer struct TO every one
	err = p.SendToAll(dealer)
	dbg.Lvl2(p.Name, "sent its dealer to every peers. (err = ", err, ")")
	// Receive the dealer struct FROM every one
	// wait with a chan to get ALL dealers
	dealChan := make(chan *poly.Dealer)
	for _, rp := range p.remote {
		go func(rp RemotePeer) {
			d := new(poly.Dealer).UnmarshalInit(p.info)
			err := poly.SUITE.Read(rp.Conn, d)
			if err != nil {
				dbg.Fatal(p.Name, " received a strange dealer from ", rp.String())
			}
			dealChan <- d
		}(rp)
	}

	// wait to get all dealers
	dbg.Lvl3(p.Name, "wait to receive every other peer's dealer...")
	n := 0
	for {
		// get the dealer and add it
		d := <-dealChan
		dbg.Lvl3(p.Name, "collected one more dealer (count = ", n, ")")
		// TODO: get the response back to the dealer
		_, err := receiver.AddDealer(p.Id, d)
		if err != nil {
			dbg.Fatal(p.Name, "has error when adding the dealer : ", err)
		}
		n += 1
		// we get enough dealers to compute the shared secret
		if n == p.info.T-1 {
			dbg.Lvl2(p.Name, "received every Dealers")
			break
		}
	}

	sh, err := receiver.ProduceSharedSecret()
	if err != nil {
		dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err ", err, ")")
	}
	dbg.Lvl2(p.Name, "produced shared secret !")
	return sh
}
Example #4
0
// listen for clients connections
// this server needs to be running on a different port
// than the Signer that is beneath it
func (s *Server) Listen() error {
	dbg.Lvl3("Listening in server at", s.name)
	ln, err := net.Listen("tcp4", s.name)
	if err != nil {
		panic(err)
	}

	go func() {
		for {
			// dbg.Lvl4("LISTENING TO CLIENTS: %p", s)
			conn, err := ln.Accept()
			if err != nil {
				// handle error
				dbg.Lvl3("failed to accept connection")
				continue
			}

			c := coconet.NewTCPConnFromNet(conn)
			// dbg.Lvl4("CLIENT TCP CONNECTION SUCCESSFULLY ESTABLISHED:", c)

			if _, ok := s.Clients[c.Name()]; !ok {
				s.Clients[c.Name()] = c

				go func(c coconet.Conn) {
					for {
						tsm := TimeStampMessage{}
						err := c.Get(&tsm)
						if err != nil {
							log.Errorf("%p Failed to get from child:", s, err)
							s.Close()
							return
						}
						switch tsm.Type {
						default:
							log.Errorf("Message of unknown type: %v\n", tsm.Type)
						case StampRequestType:
							// dbg.Lvl4("RECEIVED STAMP REQUEST")
							s.mux.Lock()
							READING := s.READING
							s.Queue[READING] = append(s.Queue[READING],
								MustReplyMessage{Tsm: tsm, To: c.Name()})
							s.mux.Unlock()
						}
					}
				}(c)
			}

		}
	}()

	return nil
}
Example #5
0
func (sn *Node) StartGossip() {
	go func() {
		t := time.Tick(GOSSIP_TIME)
		for {
			select {
			case <-t:
				sn.viewmu.Lock()
				c := sn.HostListOn(sn.ViewNo)
				sn.viewmu.Unlock()
				if len(c) == 0 {
					log.Errorln(sn.Name(), "StartGossip: none in hostlist for view: ", sn.ViewNo, len(c))
					continue
				}
				sn.randmu.Lock()
				from := c[sn.Rand.Int()%len(c)]
				sn.randmu.Unlock()
				dbg.Lvl4("Gossiping with: ", from)
				sn.CatchUp(int(atomic.LoadInt64(&sn.LastAppliedVote)+1), from)
			case <-sn.closed:
				dbg.Lvl3("stopping gossip: closed")
				return
			}
		}
	}()
}
Example #6
0
// NewPeer returns a new peer with its id and the number of peers in the schnorr signature algo
// TODO verification of string addr:port
func NewPeer(id int, name string, p poly.PolyInfo, isRoot bool) *Peer {

	if id >= p.N {
		log.Fatal("Error while NewPeer : gien ", id, " as id whereas polyinfo.N = ", p.N)

	}
	// Setup of the private / public pair
	key := cliutils.KeyPair(poly.SUITE)
	// setup of the public list of key
	pubKeys := make([]abstract.Point, p.N)
	pubKeys[id] = key.Public
	dbg.Lvl3(name, "(id", id, ") has created its private/public key : public => ", key.Public)

	return &Peer{
		Id:      id,
		remote:  make(map[int]RemotePeer),
		root:    isRoot,
		Name:    name,
		info:    p,
		key:     key,
		pubKeys: pubKeys,
		schnorr: new(poly.Schnorr),
		synChan: make(chan Syn),
		ackChan: make(chan Ack),
	}
}
Example #7
0
// SynWithPeer will receive and send the public keys between the peer
// If all goes well, it will add the peer to the remotePeer array
// and notify to the channel synChan
func (p *Peer) synWithPeer(conn net.Conn) {
	// First we need to SYN mutually
	s := Syn{
		Id:     p.Id,
		Public: p.key.Public,
	}
	err := poly.SUITE.Write(conn, &s)
	if err != nil {
		dbg.Fatal(p.Name, "could not send SYN to ", conn.RemoteAddr().String())
	}
	// Receive the other SYN
	s2 := Syn{}
	err = poly.SUITE.Read(conn, &s2)
	if err != nil {
		dbg.Fatal(p.Name, "could not receive SYN from ", conn.RemoteAddr().String())
	}
	if s2.Id < 0 || s2.Id >= p.info.N {
		dbg.Fatal(p.Name, "received wrong SYN info from ", conn.RemoteAddr().String())
	}
	if p.pubKeys[s2.Id] != nil {
		dbg.Fatal(p.Name, "already received a SYN for this index ")
	}
	p.pubKeys[s2.Id] = s2.Public
	rp := RemotePeer{Conn: conn, Id: s2.Id, Hostname: conn.RemoteAddr().String()}
	p.remote[s2.Id] = rp
	dbg.Lvl3(p.String(), "has SYN'd with peer ", rp.String())
	p.synChan <- s2
}
Example #8
0
// WaitAcks will make  a peer  waits for all others peers to send an ACK to it
func (p *Peer) WaitACKs() {
	var wg sync.WaitGroup
	fn := func(rp RemotePeer) {
		a := Ack{}
		err := poly.SUITE.Read(rp.Conn, &a)
		if err != nil {
			dbg.Fatal(p.Name, "could not receive an ACK from ", rp.String(), " (err ", err, ")")
		}
		//p.ackChan <- a
		wg.Done()
	}
	wg.Add(len(p.remote))
	p.ForRemotePeers(fn)

	dbg.Lvl3(p.Name, "is waiting for acks ...")
	wg.Wait()
	dbg.Lvl2(p.String(), "received ALL ACKs")
	//n := 0
	//for {
	//	a := <-p.ackChan
	//	if a.Valid {
	//		n += 1
	//	}
	//	if n == p.info.N-1 {
	//		dbg.Lvl2(p.Name, "received all acks. Continue")
	//		break
	//	}
	//}
}
Example #9
0
// ConnectTo will connect to the given host and start the SYN exchange (public key + id)
func (p *Peer) ConnectTo(host string) error {
	tick := time.NewTicker(ConnWaitRetry)
	count := 0
	for range tick.C {
		// connect
		conn, err := net.Dial("tcp", host)
		if err != nil {
			// we have tried too many times => abort
			if count == ConnRetry {
				tick.Stop()
				dbg.Fatal(p.Name, "could not connect to", host, " ", ConnRetry, "times. Abort.")
				// let's try again one more time
			} else {
				dbg.Lvl2(p.Name, "could not connect to", host, ". Retry in ", ConnWaitRetry.String())
				count += 1
			}
		}
		// handle successful connection
		dbg.Lvl3(p.Name, "has connected with peer ", host)
		tick.Stop()
		// start to syn with the respective peer
		go p.synWithPeer(conn)
		break
	}
	return nil
}
Example #10
0
// parse the hosts.txt file to create a separate list (and file)
// of physical nodes and virtual nodes. Such that each host on line i, in phys.txt
// corresponds to each host on line i, in virt.txt.
func (d *Deter) readHosts() {
	hosts_file := d.DeployDir + "/hosts.txt"
	nmachs, nloggers := d.Config.Nmachs, d.Config.Nloggers

	physVirt, err := cliutils.ReadLines(hosts_file)
	if err != nil {
		log.Panic("Couldn't find", hosts_file)
	}

	d.phys = make([]string, 0, len(physVirt)/2)
	d.virt = make([]string, 0, len(physVirt)/2)
	for i := 0; i < len(physVirt); i += 2 {
		d.phys = append(d.phys, physVirt[i])
		d.virt = append(d.virt, physVirt[i+1])
	}
	d.phys = d.phys[:nmachs+nloggers]
	d.virt = d.virt[:nmachs+nloggers]
	d.physOut = strings.Join(d.phys, "\n")
	d.virtOut = strings.Join(d.virt, "\n")
	d.masterLogger = d.phys[0]
	// slaveLogger1 := phys[1]
	// slaveLogger2 := phys[2]

	// phys.txt and virt.txt only contain the number of machines that we need
	dbg.Lvl3("Reading phys and virt")
	err = ioutil.WriteFile(d.DeployDir+"/phys.txt", []byte(d.physOut), 0666)
	if err != nil {
		log.Fatal("failed to write physical nodes file", err)
	}

	err = ioutil.WriteFile(d.DeployDir+"/virt.txt", []byte(d.virtOut), 0666)
	if err != nil {
		log.Fatal("failed to write virtual nodes file", err)
	}
}
Example #11
0
func (d *Deter) Build(build string) error {
	dbg.Lvl1("Building for", d.Login, d.Host, d.Project, build)
	start := time.Now()

	var wg sync.WaitGroup

	// Start with a clean build-directory
	current, _ := os.Getwd()
	dbg.Lvl4("Current dir is:", current)
	defer os.Chdir(current)

	// Go into deterlab-dir and create the build-dir
	os.Chdir(d.DeterDir)
	os.RemoveAll(d.BuildDir)
	os.Mkdir(d.BuildDir, 0777)

	// start building the necessary packages
	packages := []string{"logserver", "forkexec", "../../app", "deter"}
	if build != "" {
		packages = strings.Split(build, ",")
	}
	dbg.Lvl3("Starting to build all executables", packages)
	for _, p := range packages {
		basename := path.Base(p)
		dbg.Lvl4("Building ", p, "into", basename)
		wg.Add(1)
		src := p + "/" + basename + ".go"
		dst := d.BuildDir + "/" + basename
		if p == "deter" {
			go func(s, d string) {
				defer wg.Done()
				// the users node has a 386 FreeBSD architecture
				out, err := cliutils.Build(s, d, "386", "freebsd")
				if err != nil {
					cliutils.KillGo()
					fmt.Println(out)
					log.Fatal(err)
				}
			}(src, dst)
			continue
		}
		go func(s, d string) {
			defer wg.Done()
			// deter has an amd64, linux architecture
			out, err := cliutils.Build(s, d, "amd64", "linux")
			if err != nil {
				cliutils.KillGo()
				fmt.Println(out)
				log.Fatal(err)
			}
		}(src, dst)
	}
	// wait for the build to finish
	wg.Wait()
	dbg.Lvl1("Build is finished after", time.Since(start))
	return nil
}
Example #12
0
func (d *Deter) Start() error {
	dbg.Lvl1("Running with", d.Config.Nmachs, "nodes *", d.Config.Hpn, "hosts per node =",
		d.Config.Nmachs*d.Config.Hpn, "and", d.Config.Nloggers, "loggers")

	// setup port forwarding for viewing log server
	dbg.Lvl3("setup port forwarding for master logger: ", d.masterLogger, d.Login, d.Host)
	cmd := exec.Command(
		"ssh",
		"-t",
		"-t",
		fmt.Sprintf("%s@%s", d.Login, d.Host),
		"-L",
		"8081:"+d.masterLogger+":10000")
	err := cmd.Start()
	if err != nil {
		log.Fatal("failed to setup portforwarding for logging server")
	}

	dbg.Lvl3("runnning deter with nmsgs:", d.Config.Nmsgs, d.Login, d.Host)
	// run the deter lab boss nodes process
	// it will be responsible for forwarding the files and running the individual
	// timestamping servers

	go func() {
		dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host,
			"GOMAXPROCS=8 remote/deter -nmsgs="+strconv.Itoa(d.Config.Nmsgs)+
				" -hpn="+strconv.Itoa(d.Config.Hpn)+
				" -bf="+strconv.Itoa(d.Config.Bf)+
				" -rate="+strconv.Itoa(d.Config.Rate)+
				" -rounds="+strconv.Itoa(d.Config.Rounds)+
				" -debug="+strconv.Itoa(d.Config.Debug)+
				" -failures="+strconv.Itoa(d.Config.Failures)+
				" -rfail="+strconv.Itoa(d.Config.RFail)+
				" -ffail="+strconv.Itoa(d.Config.FFail)+
				" -app="+d.Config.App+
				" -suite="+d.Config.Suite))
		dbg.Lvl3("Sending stop of ssh")
		d.sshDeter <- "stop"
	}()

	return nil
}
Example #13
0
func (d *Deter) Stop() error {
	killssh := exec.Command("pkill", "-f", "ssh -t -t")
	killssh.Stdout = os.Stdout
	killssh.Stderr = os.Stderr
	err := killssh.Run()
	if err != nil {
		dbg.Lvl3("Stopping ssh: ", err)
	}
	select {
	case msg := <-d.sshDeter:
		if msg == "stop" {
			dbg.Lvl3("SSh is stopped")
		} else {
			dbg.Lvl1("Received other command", msg)
		}
	case <-time.After(time.Second * 3):
		dbg.Lvl3("Timeout error when waiting for end of ssh")
	}
	return nil
}
Example #14
0
func ExampleLevel2() {
	dbg.Lvl1("Level1")
	dbg.Lvl3("Level2")
	dbg.Lvl4("Level3")
	dbg.Lvl4("Level4")
	dbg.Lvl5("Level5")

	// Output:
	// 1: (            debug_lvl_test.ExampleLevel2: 0) - Level1
	// 2: (            debug_lvl_test.ExampleLevel2: 0) - Level2
}
Example #15
0
func (v *Views) NewView(view int, parent string, children []string, hostlist []string) {
	dbg.Lvl3("New view", view, hostlist)
	v.Lock()
	vi := &View{Num: view, Parent: parent}
	vi.HostList = make([]string, len(hostlist))
	copy(vi.HostList, hostlist)
	vi.Children = make([]string, len(children))
	copy(vi.Children, children)

	v.Views[view] = vi
	v.Unlock()
}
Example #16
0
// Close closes the connection.
func (tc *TCPConn) Close() {
	dbg.Lvl3("tcpconn: closing connection")
	tc.encLock.Lock()
	defer tc.encLock.Unlock()
	if tc.conn != nil {
		// ignore error because only other possibility was an invalid
		// connection. but we don't care if we close a connection twice.
		tc.conn.Close()
	}
	tc.closed = true
	tc.conn = nil
	tc.enc = nil
	tc.dec = nil
}
Example #17
0
// WaitSYNs will wait until every peers has syn'd with this one
func (p *Peer) WaitSYNs() {
	for {
		s := <-p.synChan
		dbg.Lvl3(p.Name, " synChan received Syn id ", s.Id)
		_, ok := p.remote[s.Id]
		if !ok {
			dbg.Fatal(p.Name, "received syn'd notification of an unknown peer... ABORT")
		}
		if len(p.remote) == p.info.N-1 {
			dbg.Lvl2(p.Name, "is SYN'd with every one")
			break
		}
	}
}
Example #18
0
// When client asks for val to be timestamped
// It blocks until it get a coll_stamp reply back
func (c *Client) TimeStamp(val []byte, TSServerName string) error {
	c.Mux.Lock()
	if c.Error != nil {
		c.Mux.Unlock()
		return c.Error
	}
	c.reqno++
	myReqno := c.reqno
	c.doneChan[c.reqno] = make(chan error, 1) // new done channel for new req
	c.Mux.Unlock()
	// send request to TSServer
	err := c.PutToServer(TSServerName,
		&TimeStampMessage{
			Type:  StampRequestType,
			ReqNo: myReqno,
			Sreq:  &StampRequest{Val: val}})
	if err != nil {
		if err != coconet.ErrNotEstablished {
			log.Warn(c.Name(), "error timestamping to ", TSServerName, ": ", err)
		}
		// pass back up all errors from putting to server
		return err
	}

	// get channel associated with request
	c.Mux.Lock()
	myChan := c.doneChan[myReqno]
	c.Mux.Unlock()

	// wait until ProcessStampReply signals that reply was received
	select {
	case err = <-myChan:
		//log.Println("-------------client received  response from" + TSServerName)
		break
	case <-time.After(10 * ROUND_TIME):
		dbg.Lvl3("client timeouted on waiting for response from" + TSServerName)
		break
		// err = ErrClientToTSTimeout
	}
	if err != nil {
		log.Errorln(c.Name(), "error received from DoneChan:", err)
		return err
	}

	// delete channel as it is of no longer meaningful
	c.Mux.Lock()
	delete(c.doneChan, myReqno)
	c.Mux.Unlock()
	return err
}
Example #19
0
// Close closes all the connections currently open.
func (h *TCPHost) Close() {
	dbg.Lvl3("tcphost: closing")
	// stop accepting new connections
	atomic.StoreInt64(&h.closed, 1)
	h.listener.Close()

	// close peer connections
	h.PeerLock.Lock()
	for _, p := range h.peers {
		if p != nil {
			p.Close()
		}
	}
	h.PeerLock.Unlock()

}
Example #20
0
func RunServer(app *config.AppConfig, conf *deploy.Config, hc *config.HostConfig) {
	// run this specific host
	err := hc.Run(false, sign.MerkleTree, app.Hostname)
	if err != nil {
		log.Fatal(err)
	}

	dbg.Lvl3(app.Hostname, "started up in server-mode")

	// Let's start the client if we're the root-node
	if hc.SNodes[0].IsRoot(0) {
		dbg.Lvl1(app.Hostname, "started client")
		RunClient(conf, hc)
	} else {
		// Endless-loop till we stop by tearing down the connections
		for {
			time.Sleep(time.Minute)
		}
	}
}
Example #21
0
func (d *Deter) Deploy() error {
	dbg.Lvl1("Assembling all files and configuration options")
	os.RemoveAll(d.DeployDir)
	os.Mkdir(d.DeployDir, 0777)

	dbg.Lvl1("Writing config-files")

	d.generateHostsFile()
	d.readHosts()
	d.calculateGraph()
	d.WriteConfig()

	// copy the webfile-directory of the logserver to the remote directory
	err := exec.Command("cp", "-a", d.DeterDir+"/logserver/webfiles",
		d.DeterDir+"/cothority.conf", d.DeployDir).Run()
	if err != nil {
		log.Fatal("error copying webfiles:", err)
	}
	build, err := ioutil.ReadDir(d.BuildDir)
	for _, file := range build {
		err = exec.Command("cp", d.BuildDir+"/"+file.Name(), d.DeployDir).Run()
		if err != nil {
			log.Fatal("error copying build-file:", err)
		}
	}

	dbg.Lvl1("Copying over to", d.Login, "@", d.Host)
	// Copy everything over to deterlabs
	err = cliutils.Rsync(d.Login, d.Host, d.DeployDir+"/", "remote/")
	if err != nil {
		log.Fatal(err)
	}

	dbg.Lvl1("Done copying")

	dbg.Lvl3(cliutils.SshRunStdout(d.Login, d.Host,
		""))

	return nil
}
Example #22
0
func (c *Client) AddServer(name string, conn coconet.Conn) {
	//c.Servers[name] = conn
	go func(conn coconet.Conn) {
		maxwait := 30 * time.Second
		curWait := 100 * time.Millisecond
		for {
			err := conn.Connect()
			if err != nil {
				time.Sleep(curWait)
				curWait = curWait * 2
				if curWait > maxwait {
					curWait = maxwait
				}
				continue
			} else {
				c.Mux.Lock()
				c.Servers[name] = conn
				c.Mux.Unlock()
				dbg.Lvl3("Success: connected to server:", conn)
				err := c.handleServer(conn)
				// if a server encounters any terminating error
				// terminate all pending client transactions and kill the client
				if err != nil {
					log.Errorln("EOF detected: sending EOF to all pending TimeStamps")
					c.Mux.Lock()
					for _, ch := range c.doneChan {
						log.Println("Sending to Receiving Channel")
						ch <- io.EOF
					}
					c.Error = io.EOF
					c.Mux.Unlock()
					return
				} else {
					// try reconnecting if it didn't close the channel
					continue
				}
			}
		}
	}(conn)
}
Example #23
0
// Put puts data to the connection.
// Returns io.EOF on an irrecoverable error.
// Returns actual error if it is Temporary.
func (tc *TCPConn) Put(bm BinaryMarshaler) error {
	if tc.Closed() {
		dbg.Lvl3("tcpconn: put: connection closed")
		return ErrClosed
	}
	tc.encLock.Lock()
	if tc.enc == nil {
		tc.encLock.Unlock()
		return ErrNotEstablished
	}
	enc := tc.enc
	tc.encLock.Unlock()

	err := enc.Encode(bm)
	if err != nil {
		if IsTemporary(err) {
			return err
		}
		tc.Close()
		return ErrClosed
	}
	return err
}
Example #24
0
func (h *TCPHost) ConnectTo(parent string) error {
	// If we have alReady set up this connection don't do anything
	h.PeerLock.Lock()
	if h.Ready[parent] {
		log.Println("ConnectTo: node already ready")
		h.PeerLock.RUnlock()
		return nil
	}
	h.PeerLock.Unlock()

	// connect to the parent
	conn, err := net.Dial("tcp4", parent)
	if err != nil {
		dbg.Lvl3("tcphost:", h.Name(), "failed to connect to parent:", err)
		return err
	}
	tp := NewTCPConnFromNet(conn)

	mname := StringMarshaler(h.Name())
	err = tp.Put(&mname)
	if err != nil {
		log.Errorln(err)
		return err
	}
	tp.SetName(parent)

	// give parent the public key
	err = tp.Put(h.Pubkey)
	if err != nil {
		log.Errorln("failed to send public key")
		return err
	}

	// get and set the parents public key
	suite := h.suite
	pubkey := suite.Point()
	err = tp.Get(pubkey)
	if err != nil {
		log.Errorln("failed to establish connection: getting pubkey:", err)
		tp.Close()
		return err
	}
	tp.SetPubKey(pubkey)

	h.PeerLock.Lock()
	h.Ready[tp.Name()] = true
	h.peers[parent] = tp
	// h.PendingPeers[parent] = true
	h.PeerLock.Unlock()
	dbg.Lvl4("CONNECTED TO PARENT:", parent)

	go func() {
		for {
			data := h.pool.Get().(BinaryUnmarshaler)
			err := tp.Get(data)

			h.msgchan <- NetworkMessg{Data: data, From: tp.Name(), Err: err}
		}
	}()

	return nil
}
Example #25
0
// Listen listens for incoming TCP connections.
// It is a non-blocking call that runs in the background.
// It accepts incoming connections and establishes Peers.
// When a peer attempts to connect it must send over its name (as a StringMarshaler),
// as well as its public key.
// Only after that point can be communicated with.
func (h *TCPHost) Listen() error {
	var err error
	dbg.Lvl3("Starting to listen on", h.name)
	ln, err := net.Listen("tcp4", h.name)
	if err != nil {
		log.Println("failed to listen:", err)
		return err
	}
	h.listener = ln
	go func() {
		for {
			var err error
			dbg.Lvl3(h.Name(), "Accepting incoming")
			conn, err := ln.Accept()
			dbg.Lvl3(h.Name(), "Connection request - handling")
			if err != nil {
				dbg.Lvl3("failed to accept connection: ", err)
				// if the host has been closed then stop listening
				if atomic.LoadInt64(&h.closed) == 1 {
					return
				}
				continue
			}

			// Read in name of client
			tp := NewTCPConnFromNet(conn)
			var mname StringMarshaler
			err = tp.Get(&mname)
			if err != nil {
				log.Errorln("failed to establish connection: getting name: ", err)
				tp.Close()
				continue
			}
			name := string(mname)

			// create connection
			tp.SetName(name)

			// get and set public key
			suite := h.suite
			pubkey := suite.Point()
			err = tp.Get(pubkey)
			if err != nil {
				log.Errorln("failed to establish connection: getting pubkey:", err)
				tp.Close()
				continue
			}
			tp.SetPubKey(pubkey)

			// give child the public key
			err = tp.Put(h.Pubkey)
			if err != nil {
				log.Errorln("failed to send public key:", err)
				continue
			}

			// the connection is now Ready to use
			h.PeerLock.Lock()
			h.Ready[name] = true
			h.peers[name] = tp
			dbg.Lvl3("Connected to child:", tp.Name())
			h.PeerLock.Unlock()

			go func() {
				for {
					data := h.pool.Get().(BinaryUnmarshaler)
					err := tp.Get(data)

					h.msgchan <- NetworkMessg{Data: data, From: tp.Name(), Err: err}
				}
			}()
		}
	}()
	return nil
}
Example #26
0
func main() {
	deter, err := deploy.ReadConfig()
	if err != nil {
		log.Fatal("Couldn't load config-file in forkexec:", err)
	}
	conf = deter.Config
	dbg.DebugVisible = conf.Debug

	flag.Parse()

	// connect with the logging server
	if logger != "" {
		// blocks until we can connect to the logger
		lh, err := logutils.NewLoggerHook(logger, physaddr, conf.App)
		if err != nil {
			log.WithFields(log.Fields{
				"file": logutils.File(),
			}).Fatalln("Error setting up logging server:", err)
		}
		log.AddHook(lh)
	}

	setup_deter()

	i := 0
	var wg sync.WaitGroup
	virts := physToServer[physaddr]
	if len(virts) > 0 {
		dbg.Lvl3("starting timestampers for", len(virts), "client(s)", virts)
		i = (i + 1) % len(loggerports)
		for _, name := range virts {
			dbg.Lvl4("Starting", name, "on", physaddr)
			wg.Add(1)
			go func(nameport string) {
				dbg.Lvl3("Running on", physaddr, "starting", nameport)
				defer wg.Done()

				args := []string{
					"-hostname=" + nameport,
					"-logger=" + logger,
					"-physaddr=" + physaddr,
					"-amroot=" + strconv.FormatBool(nameport == rootname),
					"-test_connect=" + strconv.FormatBool(testConnect),
					"-mode=server",
					"-app=" + conf.App,
				}

				dbg.Lvl3("Starting on", physaddr, "with args", args)
				cmdApp := exec.Command("./app", args...)
				//cmd.Stdout = log.StandardLogger().Writer()
				//cmd.Stderr = log.StandardLogger().Writer()
				cmdApp.Stdout = os.Stdout
				cmdApp.Stderr = os.Stderr
				dbg.Lvl3("fork-exec is running command:", args)
				err = cmdApp.Run()
				if err != nil {
					dbg.Lvl2("cmd run:", err)
				}

				// get CPU usage stats
				st := cmdApp.ProcessState.SystemTime()
				ut := cmdApp.ProcessState.UserTime()
				log.WithFields(log.Fields{
					"file":     logutils.File(),
					"type":     "forkexec",
					"systime":  st,
					"usertime": ut,
				}).Info("")

				dbg.Lvl2("Finished with Timestamper", physaddr)
			}(name)
		}
		dbg.Lvl3(physaddr, "Finished starting timestampers")
		wg.Wait()
	} else {
		dbg.Lvl2("No timestampers for", physaddr)
	}
	dbg.Lvl2(physaddr, "timestampers exited")
}
Example #27
0
func setup_deter() {
	virt, err := cliutils.ReadLines("virt.txt")
	if err != nil {
		log.Fatal(err)
	}
	phys, err := cliutils.ReadLines("phys.txt")
	if err != nil {
		log.Fatal(err)
	}
	vpmap := make(map[string]string)
	for i := range virt {
		vpmap[virt[i]] = phys[i]
	}
	nloggers := conf.Nloggers
	masterLogger := phys[0]
	loggers := []string{masterLogger}
	for n := 1; n <= nloggers; n++ {
		loggers = append(loggers, phys[n])
	}

	phys = phys[nloggers:]
	virt = virt[nloggers:]

	// Read in and parse the configuration file
	file, err := ioutil.ReadFile("tree.json")
	if err != nil {
		log.Fatal("deter.go: error reading configuration file: %v\n", err)
	}
	dbg.Lvl4("cfg file:", string(file))
	var cf config.ConfigFile
	err = json.Unmarshal(file, &cf)
	if err != nil {
		log.Fatal("unable to unmarshal config.ConfigFile:", err)
	}

	hostnames := cf.Hosts
	dbg.Lvl4("hostnames:", hostnames)

	depth := graphs.Depth(cf.Tree)
	cf.Tree.TraverseTree(func(t *graphs.Tree) {
		if random_leaf != "" {
			return
		}
		if len(t.Children) == 0 {
			random_leaf = t.Name
		}
	})

	rootname = hostnames[0]

	dbg.Lvl4("depth of tree:", depth)

	// mapping from physical node name to the timestamp servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer = make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}
	dbg.Lvl3("PhysToServer is", physToServer)

	loggerports = make([]string, len(loggers))
	for i, logger := range loggers {
		loggerports[i] = logger + ":10000"
	}

}
Example #28
0
// Dispatch-function for running either client or server (mode-parameter)
func Run(app *config.AppConfig, conf *deploy.Config) {
	// Do some common setup
	if app.Mode == "client" {
		app.Hostname = app.Name
	}
	dbg.Lvl3(app.Hostname, "Starting to run")
	if conf.Debug > 1 {
		sign.DEBUG = true
	}

	if app.Hostname == "" {
		log.Fatal("no hostname given", app.Hostname)
	}

	// load the configuration
	dbg.Lvl3("loading configuration for", app.Hostname)
	var hc *config.HostConfig
	var err error
	s := GetSuite(conf.Suite)
	opts := config.ConfigOptions{ConnType: "tcp", Host: app.Hostname, Suite: s}
	if conf.Failures > 0 || conf.FFail > 0 {
		opts.Faulty = true
	}
	hc, err = config.LoadConfig("tree.json", opts)
	if err != nil {
		fmt.Println(err)
		log.Fatal(err)
	}

	// Wait for everybody to be ready before going on
	ioutil.WriteFile("coll_stamp_up/up"+app.Hostname, []byte("started"), 0666)
	for {
		_, err := os.Stat("coll_stamp_up")
		if err == nil {
			files, _ := ioutil.ReadDir("coll_stamp_up")
			dbg.Lvl4(app.Hostname, "waiting for others to finish", len(files))
			time.Sleep(time.Second)
		} else {
			break
		}
	}
	dbg.Lvl2(app.Hostname, "thinks everybody's here")

	// set FailureRates
	if conf.Failures > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailureRate = conf.Failures
		}
	}

	// set root failures
	if conf.RFail > 0 {
		for i := range hc.SNodes {
			hc.SNodes[i].FailAsRootEvery = conf.RFail

		}
	}
	// set follower failures
	// a follower fails on %ffail round with failureRate probability
	for i := range hc.SNodes {
		hc.SNodes[i].FailAsFollowerEvery = conf.FFail
	}

	defer func() {
		dbg.Lvl1("Collective Signing", app.Hostname, "has terminated in mode", app.Mode)
	}()

	switch app.Mode {
	case "client":
		log.Panic("No client mode")
	case "server":
		RunServer(app, conf, hc)
	}
}
Example #29
0
func streamMessgs(c *Client, servers []string, rate int) {
	dbg.Lvl4(c.Name(), "streaming at given rate", rate)
	// buck[i] = # of timestamp responses received in second i
	buck := make([]int64, MAX_N_SECONDS)
	// roundsAfter[i] = # of timestamp requests that were processed i rounds late
	roundsAfter := make([]int64, MAX_N_ROUNDS)
	times := make([]int64, MAX_N_SECONDS*1000) // maximum number of milliseconds (maximum rate > 1 per millisecond)
	ticker := time.Tick(time.Duration(rate) * time.Millisecond)
	msg := genRandomMessages(1)[0]
	i := 0
	nServers := len(servers)

retry:
	dbg.Lvl3(c.Name(), "checking if", servers[0], "is already up")
	err := c.TimeStamp(msg, servers[0])
	if err == io.EOF || err == coconet.ErrClosed {
		dbg.Lvl4("Client", c.Name(), "DONE: couldn't connect to TimeStamp")
		log.Fatal(AggregateStats(buck, roundsAfter, times))
	} else if err == ErrClientToTSTimeout {
		dbg.Lvl4(err.Error())
	} else if err != nil {
		time.Sleep(500 * time.Millisecond)
		goto retry
	}
	dbg.Lvl3(c.Name(), "successfully connected to", servers[0])

	tFirst := time.Now()

	// every tick send a time coll_stamp request to every server specified
	// this will stream until we get an EOF
	tick := 0
	for _ = range ticker {
		tick += 1
		go func(msg []byte, s string, tick int) {
			t0 := time.Now()
			err := c.TimeStamp(msg, s)
			t := time.Since(t0)

			if err == io.EOF || err == coconet.ErrClosed {
				if err == io.EOF {
					dbg.Lvl4("CLIENT ", c.Name(), "DONE: terminating due to EOF", s)
				} else {
					dbg.Lvl4("CLIENT ", c.Name(), "DONE: terminating due to Connection Error Closed", s)
				}
				log.Fatal(AggregateStats(buck, roundsAfter, times))
			} else if err != nil {
				// ignore errors
				dbg.Lvl4("Client", c.Name(), "Leaving out streamMessages. ", err)
				return
			}

			// TODO: we might want to subtract a buffer from secToTimeStamp
			// to account for computation time
			secToTimeStamp := t.Seconds()
			secSinceFirst := time.Since(tFirst).Seconds()
			atomic.AddInt64(&buck[int(secSinceFirst)], 1)
			index := int(secToTimeStamp) / int(ROUND_TIME/time.Second)
			atomic.AddInt64(&roundsAfter[index], 1)
			atomic.AddInt64(&times[tick], t.Nanoseconds())

		}(msg, servers[i], tick)

		i = (i + 1) % nServers
	}

}
Example #30
0
// Listen on client connections. If role is root also send annoucement
// for all of the nRounds
func (s *Server) Run(role string, nRounds int) {
	// defer func() {
	// 	log.Infoln(s.Name(), "CLOSE AFTER RUN")
	// 	s.Close()
	// }()

	dbg.Lvl3("Stamp-server", s.name, "starting with ", role, "and rounds", nRounds)
	closed := make(chan bool, 1)

	go func() { err := s.Signer.Listen(); closed <- true; s.Close(); log.Error(err) }()
	if role == "test_connect" {
		role = "regular"
		go func() {
			//time.Sleep(30 * time.Second)
			hostlist := s.Hostlist()
			ticker := time.Tick(15 * time.Second)
			i := 0
			for _ = range ticker {
				select {
				case <-closed:
					dbg.Lvl4("server.Run: received closed")
					return
				default:
				}
				if i%2 == 0 {
					dbg.Lvl4("removing self")
					s.Signer.RemoveSelf()
				} else {
					dbg.Lvl4("adding self: ", hostlist[(i/2)%len(hostlist)])
					s.Signer.AddSelf(hostlist[(i/2)%len(hostlist)])
				}
				i++
			}
		}()
	}
	s.rLock.Lock()
	s.maxRounds = nRounds
	s.rLock.Unlock()

	var nextRole string // next role when view changes
	for {
		switch role {

		case "root":
			dbg.Lvl4("running as root")
			nextRole = s.runAsRoot(nRounds)
		case "regular":
			dbg.Lvl4("running as regular")
			nextRole = s.runAsRegular()
		case "test":
			dbg.Lvl4("running as test")
			ticker := time.Tick(2000 * time.Millisecond)
			for _ = range ticker {
				s.AggregateCommits(0)
			}
		default:
			dbg.Lvl4("UNABLE TO RUN AS ANYTHING")
			return
		}

		// dbg.Lvl4(s.Name(), "nextRole: ", nextRole)
		if nextRole == "close" {
			s.Close()
			return
		}
		if nextRole == "" {
			return
		}
		s.LogReRun(nextRole, role)
		role = nextRole
	}

}