コード例 #1
0
ファイル: node.go プロジェクト: mlncn/cothority
// getAddress gets the localhosts IPv4 address.
func GetAddress() (string, error) {
	name, err := os.Hostname()
	if err != nil {
		dbg.Error("Error Resolving Hostname:", err)
		return "", err
	}

	if ipv4host == "NONE" {
		as, err := net.LookupHost(name)
		if err != nil {
			return "", err
		}

		addr := ""

		for _, a := range as {
			dbg.Lvl4("a = %+v", a)
			if ipv4Reg.MatchString(a) {
				dbg.Lvl4("matches")
				addr = a
			}
		}

		if addr == "" {
			err = errors.New("No IPv4 Address for Hostname")
		}
		return addr, err
	}
	return ipv4host, nil
}
コード例 #2
0
ファイル: localhost.go プロジェクト: mlncn/cothority
func (d *Localhost) Start(args ...string) error {
	os.Chdir(d.RunDir)
	dbg.Lvl4("Localhost: chdir into", d.RunDir)
	ex := d.RunDir + "/" + d.App
	dbg.Lvl4("Localhost: in Start() => hosts", d.Hosts)
	d.running = true
	dbg.Lvl1("Starting", len(d.Hosts), "applications of", ex)
	for index, host := range d.Hosts {
		dbg.Lvl3("Starting", index, "=", host)
		amroot := fmt.Sprintf("-amroot=%s", strconv.FormatBool(index == 0))
		cmdArgs := []string{"-hostname", host, "-mode", "server", "-logger",
			"localhost:" + monitor.SinkPort, amroot}
		cmdArgs = append(args, cmdArgs...)
		dbg.Lvl3("CmdArgs are", cmdArgs)
		cmd := exec.Command(ex, cmdArgs...)
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		go func(i int, h string) {
			dbg.Lvl3("Localhost: will start host", host)
			d.wg_run.Add(1)
			err := cmd.Run()
			if err != nil {
				dbg.Lvl3("Error running localhost", h, ":", err)
			}
			d.wg_run.Done()
			dbg.Lvl3("host (index", i, ")", h, "done")
		}(index, host)
	}
	return nil
}
コード例 #3
0
ファイル: tcphost.go プロジェクト: mlncn/cothority
// PutDown sends a message (an interface{} value) up to all children through
// whatever 'network' interface each child Peer implements.
func (h *TCPHost) PutDown(ctx context.Context, view int, data []BinaryMarshaler) error {
	// Try to send the message to all children
	// If at least one of the attempts fails, return a non-nil error
	var err error
	var errLock sync.Mutex
	children := h.views.Children(view)
	if len(data) != len(children) {
		panic("number of messages passed down != number of children")
	}
	var canceled int64
	var wg sync.WaitGroup
	dbg.Lvl4(h.Name(), "sending to", len(children), "children")
	for i, c := range children {
		dbg.Lvl4("Sending to child", c)
		wg.Add(1)
		go func(i int, c string) {
			defer wg.Done()
			// try until it is canceled, successful, or timed-out
			for {
				// check to see if it has been canceled
				if atomic.LoadInt64(&canceled) == 1 {
					return
				}

				// if it is not Ready try again later
				h.PeerLock.Lock()
				Ready := h.Ready[c]
				conn := h.peers[c]
				h.PeerLock.Unlock()
				if Ready {
					if e := conn.PutData(data[i]); e != nil {
						errLock.Lock()
						err = e
						errLock.Unlock()
					}
					dbg.Lvl4("Informed child", c, "of", data[i])
					return
				}
				dbg.Lvl4("Re-trying, waiting to put down msg from", h.Name(), "to", c)
				time.Sleep(250 * time.Millisecond)
			}

		}(i, c)
	}
	done := make(chan struct{})
	go func() {
		wg.Wait()
		done <- struct{}{}
	}()

	select {
	case <-done:
	case <-ctx.Done():
		err = ctx.Err()
		atomic.StoreInt64(&canceled, 1)
	}

	return err
}
コード例 #4
0
ファイル: roundexception.go プロジェクト: mlncn/cothority
func (round *RoundException) Response(in []*SigningMessage, out *SigningMessage) error {
	if round.Name == ExceptionForceFailure {
		dbg.Lvl1("Forcing failure in response")
		round.RaiseException()
	}

	// initialize exception handling
	nullPoint := round.Cosi.Suite.Point().Null()

	children := round.Cosi.Children
	for _, sm := range in {
		from := sm.From
		switch sm.Type {
		default:
			// default == no response from child
			dbg.Lvl4(round.Name, "Empty response from child", from, sm.Type)
			if children[from] != nil {
				round.Cosi.ExceptionList = append(round.Cosi.ExceptionList, children[from].PubKey())

				// remove public keys and point commits from subtree of failed child
				round.Cosi.ExceptionX_hat.Add(round.Cosi.ExceptionX_hat, round.Cosi.ChildX_hat[from])
				round.Cosi.ExceptionV_hat.Add(round.Cosi.ExceptionV_hat, round.Cosi.ChildV_hat[from])
			}
			continue
		case Response:
			// disregard response from children who did not commit
			_, ok := round.Cosi.ChildV_hat[from]
			if ok == true && round.Cosi.ChildV_hat[from].Equal(nullPoint) {
				dbg.Lvl4(round.Name, ": no response from", from, sm.Type)
				continue
			}

			dbg.Lvl4(round.Name, "accepts response from", from, sm.Type)
			round.Cosi.ExceptionV_hat.Add(round.Cosi.ExceptionV_hat, sm.Rm.ExceptionV_hat)
			round.Cosi.ExceptionX_hat.Add(round.Cosi.ExceptionX_hat, sm.Rm.ExceptionX_hat)
			round.Cosi.ExceptionList = append(round.Cosi.ExceptionList, sm.Rm.ExceptionList...)
		}
	}

	round.Cosi.X_hat.Sub(round.Cosi.X_hat, round.Cosi.ExceptionX_hat)

	err := round.RoundCosi.Response(in, out)
	if err != nil {
		return err
	}

	out.Rm.ExceptionList = round.Cosi.ExceptionList
	out.Rm.ExceptionV_hat = round.Cosi.ExceptionV_hat
	out.Rm.ExceptionX_hat = round.Cosi.ExceptionX_hat
	return nil
}
コード例 #5
0
ファイル: nodevoting.go プロジェクト: mlncn/cothority
func (sn *Node) NotifyOfAction(view int, v *Vote) {
	dbg.Lvl4(sn.Name(), "Notifying node to be added/removed of action")
	gcm := &SigningMessage{
		Suite:   sn.Suite().String(),
		Type:    GroupChanged,
		From:    sn.Name(),
		ViewNbr: view,
		//LastSeenVote: int(sn.LastSeenVote),
		Gcm: &GroupChangedMessage{
			V:        v,
			HostList: sn.HostListOn(view)}}

	switch v.Type {
	case AddVT:
		if sn.Name() == v.Av.Parent {
			sn.PutTo(context.TODO(), v.Av.Name, gcm)
		}
	case RemoveVT:
		if sn.Name() == v.Rv.Parent {
			sn.PutTo(context.TODO(), v.Rv.Name, gcm)
		}
	default:
		dbg.Error("notifyofaction: unkown action type")
	}
}
コード例 #6
0
ファイル: nodevoting.go プロジェクト: mlncn/cothority
func (sn *Node) StartGossip() {
	go func() {
		t := time.Tick(GOSSIP_TIME)
		for {
			select {
			case <-t:
				sn.viewmu.Lock()
				c := sn.HostListOn(sn.ViewNo)
				sn.viewmu.Unlock()
				if len(c) == 0 {
					dbg.Error(sn.Name(), "StartGossip: none in hostlist for view:", sn.ViewNo, len(c))
					continue
				}
				sn.randmu.Lock()
				from := c[sn.Rand.Int()%len(c)]
				sn.randmu.Unlock()
				dbg.Lvl4("Gossiping with:", from)
				// we dont use voting anyway
				// sn.CatchUp(int(atomic.LoadInt64(&sn.LastAppliedVote)+1), from)
			case <-sn.closed:
				dbg.Lvl3("stopping gossip: closed")
				return
			}
		}
	}()
}
コード例 #7
0
ファイル: nodeprotocol.go プロジェクト: mlncn/cothority
// StatusReturn just adds up all children and sends the result to
// the parent
func (sn *Node) StatusReturn(view int, sm *SigningMessage) error {
	sn.PeerStatusRcvd += 1
	sn.PeerStatus.Responders += sm.SRm.Responders
	sn.PeerStatus.Peers += sm.SRm.Peers

	// Wait for other children before propagating the message
	if sn.PeerStatusRcvd < len(sn.Children(view)) {
		dbg.Lvl3(sn.Name(), "Waiting for other children")
		return nil
	}

	var err error = nil
	if sn.IsRoot(view) {
		// Add the root-node
		sn.PeerStatus.Peers += 1
		dbg.Lvl3("We got", sn.PeerStatus.Responders, "responses from", sn.PeerStatus.Peers, "peers.")
	} else {
		dbg.Lvl4(sn.Name(), "puts up statusReturn for", sn.PeerStatus)
		ctx := context.TODO()
		sm.SRm = &sn.PeerStatus
		err = sn.PutUp(ctx, view, sm)
	}
	dbg.Lvl3("Deleting round", sm.RoundNbr, sn.Rounds)
	delete(sn.Rounds, sm.RoundNbr)
	return err
}
コード例 #8
0
ファイル: forkexec.go プロジェクト: mlncn/cothority
func setup_deter() {
	vpmap := make(map[string]string)
	for i := range deter.Virt {
		vpmap[deter.Virt[i]] = deter.Phys[i]
	}

	deter.Phys = deter.Phys[:]
	deter.Virt = deter.Virt[:]

	hostnames := deter.Hostnames
	dbg.Lvl4("hostnames:", hostnames)

	rootname = hostnames[0]

	// mapping from physical node name to the app servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer = make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}
	dbg.Lvl3("PhysToServer is", physToServer)

}
コード例 #9
0
ファイル: roundstamper.go プロジェクト: mlncn/cothority
func (round *RoundStamper) Commitment(in []*sign.SigningMessage, out *sign.SigningMessage) error {
	// compute the local Merkle root

	// give up if nothing to process
	if len(round.StampQueue) == 0 {
		round.StampRoot = make([]byte, hashid.Size)
		round.StampProofs = make([]proof.Proof, 1)
	} else {
		// pull out to be Merkle Tree leaves
		round.StampLeaves = make([]hashid.HashId, 0)
		for _, msg := range round.StampQueue {
			round.StampLeaves = append(round.StampLeaves, hashid.HashId(msg))
		}

		// create Merkle tree for this round's messages and check corectness
		round.StampRoot, round.StampProofs = proof.ProofTree(round.Suite.Hash, round.StampLeaves)
		if dbg.DebugVisible > 2 {
			if proof.CheckLocalProofs(round.Suite.Hash, round.StampRoot, round.StampLeaves, round.StampProofs) == true {
				dbg.Lvl4("Local Proofs of", round.Name, "successful for round "+
					strconv.Itoa(round.RoundNbr))
			} else {
				panic("Local Proofs" + round.Name + " unsuccessful for round " +
					strconv.Itoa(round.RoundNbr))
			}
		}
	}
	out.Com.MTRoot = round.StampRoot
	round.RoundCosi.Commitment(in, out)
	return nil
}
コード例 #10
0
ファイル: cosistruct.go プロジェクト: mlncn/cothority
// Called by every node after receiving aggregate responses from descendants
func (cosi *CosiStruct) VerifyResponses() error {

	// Check that: base**r_hat * X_hat**c == V_hat
	// Equivalent to base**(r+xc) == base**(v) == T in vanillaElGamal
	Aux := cosi.Suite.Point()
	V_clean := cosi.Suite.Point()
	V_clean.Add(V_clean.Mul(nil, cosi.R_hat), Aux.Mul(cosi.X_hat, cosi.C))
	// T is the recreated V_hat
	T := cosi.Suite.Point().Null()
	T.Add(T, V_clean)
	T.Add(T, cosi.ExceptionV_hat)

	var c2 abstract.Secret
	isroot := cosi.Parent == ""
	if isroot {
		// round challenge must be recomputed given potential
		// exception list
		msg := cosi.Msg
		msg = append(msg, []byte(cosi.MTRoot)...)
		cosi.C = cosi.HashElGamal(msg, cosi.Log.V_hat)
		c2 = cosi.HashElGamal(msg, T)
	}

	// intermediary nodes check partial responses aginst their partial keys
	// the root node is also able to check against the challenge it emitted
	if !T.Equal(cosi.Log.V_hat) || (isroot && !cosi.C.Equal(c2)) {
		return errors.New("Verifying ElGamal Collective Signature failed in " +
			cosi.Name)
	} else if isroot {
		dbg.Lvl4(cosi.Name, "reports ElGamal Collective Signature succeeded")
	}
	return nil
}
コード例 #11
0
ファイル: nodeconsensus.go プロジェクト: mlncn/cothority
func (sn *Node) TryViewChange(view int) error {
	dbg.Lvl4(sn.Name(), "TRY VIEW CHANGE on", view, "with last view", sn.ViewNo)
	// should ideally be compare and swap
	sn.viewmu.Lock()
	if view <= sn.ViewNo {
		sn.viewmu.Unlock()
		return errors.New("trying to view change on previous/ current view")
	}
	if sn.ChangingView {
		sn.viewmu.Unlock()
		return ChangingViewError
	}
	sn.ChangingView = true
	sn.viewmu.Unlock()

	// take action if new view root
	if sn.Name() == sn.RootFor(view) {
		dbg.Fatal(sn.Name(), "Initiating view change for view:", view, "BTH")
		/*
			go func() {
				err := sn.StartVotingRound(
					&Vote{
						View: view,
						Type: ViewChangeVT,
						Vcv: &ViewChangeVote{
							View: view,
							Root: sn.Name()}})
				if err != nil {
					dbg.Lvl2(sn.Name(), "Try view change failed:", err)
				}
			}()
		*/
	}
	return nil
}
コード例 #12
0
ファイル: nodehelper.go プロジェクト: mlncn/cothority
func (sn *Node) Close() {
	// sn.printRoundTypes()
	sn.hbLock.Lock()
	if sn.heartbeat != nil {
		sn.heartbeat.Stop()
		sn.heartbeat = nil
		dbg.Lvl4("after close", sn.Name(), "has heartbeat=", sn.heartbeat)
	}
	if !sn.Isclosed {
		close(sn.closed)
		dbg.Lvl4("signing node: closing:", sn.Name())
		sn.Host.Close()
	}
	dbg.Lvl3("Closed connection")
	sn.Isclosed = true
	sn.hbLock.Unlock()
}
コード例 #13
0
ファイル: utils.go プロジェクト: mlncn/cothority
func Build(path, out, goarch, goos string) (string, error) {
	var cmd *exec.Cmd
	var b bytes.Buffer
	build_buffer := bufio.NewWriter(&b)

	wd, _ := os.Getwd()
	dbg.Lvl4("In directory", wd)

	cmd = exec.Command("go", "build", "-v", "-o", out, path)
	dbg.Lvl4("Building", cmd.Args, "in", path)
	cmd.Stdout = build_buffer
	cmd.Stderr = build_buffer
	cmd.Env = append([]string{"GOOS=" + goos, "GOARCH=" + goarch}, os.Environ()...)
	wd, err := os.Getwd()
	dbg.Lvl4(wd)
	dbg.Lvl4("Command:", cmd.Args)
	err = cmd.Run()
	dbg.Lvl4(b.String())
	return b.String(), err
}
コード例 #14
0
ファイル: nodevoting.go プロジェクト: mlncn/cothority
func (sn *Node) CatchUp(vi int, from string) {
	dbg.Lvl4(sn.Name(), "attempting to catch up vote", vi)

	ctx := context.TODO()
	sn.PutTo(ctx, from,
		&SigningMessage{
			Suite: sn.Suite().String(),
			From:  sn.Name(),
			Type:  CatchUpReq,
			Cureq: &CatchUpRequest{Index: vi}})
}
コード例 #15
0
ファイル: tcphost.go プロジェクト: mlncn/cothority
// AddParent adds a parent node to the TCPHost, for the given view.
func (h *TCPHost) AddParent(view int, c string) {
	h.PeerLock.Lock()
	if _, ok := h.peers[c]; !ok {
		h.peers[c] = NewTCPConn(c)
	}
	// remove from pending peers list
	delete(h.PendingPeers, c)
	h.PeerLock.Unlock()
	dbg.Lvl4("Adding parent to views on", h.Name(), "for", c)
	h.views.AddParent(view, c)
}
コード例 #16
0
ファイル: localhost.go プロジェクト: mlncn/cothority
// GenerateHosts will generate the list of hosts
// with a new port each
func (d *Localhost) GenerateHosts() {
	nrhosts := d.Machines * d.Ppm
	d.Hosts = make([]string, nrhosts)
	port := 2000
	inc := 5
	for i := 0; i < nrhosts; i++ {
		s := "127.0.0.1:" + strconv.Itoa(port+inc*i)
		d.Hosts[i] = s
	}
	dbg.Lvl4("Localhost: Generated hosts list", d.Hosts)
}
コード例 #17
0
ファイル: debug_lvl_test.go プロジェクト: mlncn/cothority
func ExampleLevel2() {
	dbg.DebugVisible = 2
	dbg.Lvl1("Level1")
	dbg.Lvl2("Level2")
	dbg.Lvl3("Level3")
	dbg.Lvl4("Level4")
	dbg.Lvl5("Level5")

	// Output:
	// 1: (                  dbg_test.ExampleLevel2:   0) - Level1
	// 2: (                  dbg_test.ExampleLevel2:   0) - Level2
}
コード例 #18
0
ファイル: utils.go プロジェクト: mlncn/cothority
func SshRunStdout(username, host, command string) error {
	addr := host
	if username != "" {
		addr = username + "@" + addr
	}

	dbg.Lvl4("Going to ssh to", addr, command)
	cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", addr,
		"eval '"+command+"'")
	cmd.Stderr = os.Stderr
	cmd.Stdout = os.Stdout
	return cmd.Run()
}
コード例 #19
0
ファイル: roundcosi.go プロジェクト: mlncn/cothority
// TODO make that in == nil in case we are a leaf to stay consistent with
// others calls
func (round *RoundCosi) Response(in []*SigningMessage, out *SigningMessage) error {
	dbg.Lvl4(round.Cosi.Name, "got all responses")
	for _, sm := range in {
		round.Cosi.R_hat.Add(round.Cosi.R_hat, sm.Rm.R_hat)
	}
	err := round.Cosi.VerifyResponses()
	if err != nil {
		dbg.Lvl3(round.Node.Name(), "Could not verify responses..")
		return err
	}
	out.Rm.R_hat = round.Cosi.R_hat
	return nil
}
コード例 #20
0
ファイル: nodevoting.go プロジェクト: mlncn/cothority
func (sn *Node) AddSelf(parent string) error {
	dbg.Lvl4("AddSelf: connecting to:", parent)
	err := sn.ConnectTo(parent)
	if err != nil {
		return err
	}

	dbg.Lvl4("AddSelf: putting group change message to:", parent)
	return sn.PutTo(
		context.TODO(),
		parent,
		&SigningMessage{
			Suite:   sn.Suite().String(),
			Type:    GroupChange,
			ViewNbr: -1,
			Vrm: &VoteRequestMessage{
				Vote: &Vote{
					Type: AddVT,
					Av: &AddVote{
						Name:   sn.Name(),
						Parent: parent}}}})
}
コード例 #21
0
ファイル: stamper.go プロジェクト: mlncn/cothority
func RunClient(flags *app.Flags, conf *app.ConfigColl) {
	dbg.Lvl4("Starting to run stampclient")
	c := NewClient(flags.Name)
	servers := strings.Split(flags.Server, ",")
	// take the right percentage of servers
	servers = scaleServers(flags, conf, servers)
	// connect to all the servers listed
	for _, s := range servers {
		h, p, err := net.SplitHostPort(s)
		if err != nil {
			log.Fatal("improperly formatted host")
		}
		pn, _ := strconv.Atoi(p)
		c.AddServer(s, coconet.NewTCPConn(net.JoinHostPort(h, strconv.Itoa(pn+1))))
	}
	// Stream time coll_stamp requests
	// if rate specified send out one message every rate milliseconds
	dbg.Lvl3(flags.Name, "starting to stream at rate", conf.Rate, "with root", flags.AmRoot)
	streamMessgs(c, servers, conf.Rate)
	dbg.Lvl4("Finished streaming", flags.Name)
	return
}
コード例 #22
0
ファイル: client_funcs.go プロジェクト: mlncn/cothority
// When client asks for val to be timestamped
// It blocks until it get a coll_stamp reply back
func (c *Client) TimeStamp(val []byte, TSServerName string) error {
	c.Mux.Lock()
	if c.Error != nil {
		c.Mux.Unlock()
		return c.Error
	}
	c.reqno++
	myReqno := c.reqno
	c.doneChan[c.reqno] = make(chan error, 1) // new done channel for new req
	c.Mux.Unlock()
	// send request to TSServer
	err := c.PutToServer(TSServerName,
		&conode.TimeStampMessage{
			Type:  conode.StampRequestType,
			ReqNo: myReqno,
			Sreq:  &conode.StampRequest{Val: val}})
	if err != nil {
		if err != coconet.ErrNotEstablished {
			dbg.Lvl3(c.Name(), "error timestamping to", TSServerName, ":", err)
		}
		// pass back up all errors from putting to server
		return err
	}
	dbg.Lvl4("Client Sent timestamp request to", TSServerName)

	// get channel associated with request
	c.Mux.Lock()
	myChan := c.doneChan[myReqno]
	c.Mux.Unlock()

	// wait until ProcessStampSignature signals that reply was received
	select {
	case err = <-myChan:
		//log.Println("-------------client received  response from" + TSServerName)
		break
	case <-time.After(10 * sign.ROUND_TIME):
		dbg.Lvl3("client timeouted on waiting for response from" + TSServerName)
		break
		// err = ErrClientToTSTimeout
	}
	if err != nil {
		dbg.Lvl3(c.Name(), "error received from DoneChan:", err)
		return err
	}

	// delete channel as it is of no longer meaningful
	c.Mux.Lock()
	delete(c.doneChan, myReqno)
	c.Mux.Unlock()
	return err
}
コード例 #23
0
ファイル: localhost.go プロジェクト: mlncn/cothority
// Reads in the localhost-config and drops out if there is an error
func (d *Localhost) ReadConfig(name ...string) {
	configName := defaultConfigName
	if len(name) > 0 {
		configName = name[0]
	}
	err := app.ReadTomlConfig(d, configName)
	_, caller, line, _ := runtime.Caller(1)
	who := caller + ":" + strconv.Itoa(line)
	if err != nil {
		dbg.Fatal("Couldn't read config in", who, ":", err)
	}
	dbg.DebugVisible = d.Debug
	dbg.Lvl4("Localhost: read the config, Hosts", d.Hosts)
}
コード例 #24
0
ファイル: roundcosi.go プロジェクト: mlncn/cothority
func (round *RoundCosi) Commitment(in []*SigningMessage, out *SigningMessage) error {
	cosi := round.Cosi
	cosi.Commits = in

	// Create the mapping between children and their respective public key + commitment
	// V for commitment
	children := cosi.Children
	cosi.ChildV_hat = make(map[string]abstract.Point, len(children))
	// X for public key
	cosi.ChildX_hat = make(map[string]abstract.Point, len(children))

	for key := range children {
		cosi.ChildX_hat[key] = cosi.Suite.Point().Null()
		cosi.ChildV_hat[key] = cosi.Suite.Point().Null()
	}

	// Commits from children are the first Merkle Tree leaves for the round
	cosi.Leaves = make([]hashid.HashId, 0)
	cosi.LeavesFrom = make([]string, 0)
	for _, sm := range cosi.Commits {
		from := sm.From
		// MTR ==> root of sub-merkle tree
		cosi.Leaves = append(cosi.Leaves, sm.Com.MTRoot)
		cosi.LeavesFrom = append(cosi.LeavesFrom, from)
		cosi.ChildV_hat[from] = sm.Com.V_hat
		cosi.ChildX_hat[from] = sm.Com.X_hat

		// Aggregation
		// add good child server to combined public key, and point commit
		cosi.X_hat.Add(cosi.X_hat, sm.Com.X_hat)
		cosi.Log.V_hat.Add(cosi.Log.V_hat, sm.Com.V_hat)
		//dbg.Lvl4("Adding aggregate public key from ", from, " : ", sm.Com.X_hat)
	}

	dbg.Lvl4("Node.Commit using Merkle")
	cosi.MerkleAddChildren()

	round.Cosi.MerkleAddLocal(out.Com.MTRoot)
	round.Cosi.MerkleHashLog()
	round.Cosi.ComputeCombinedMerkleRoot()

	out.Com.V = round.Cosi.Log.V
	out.Com.V_hat = round.Cosi.Log.V_hat
	out.Com.X_hat = round.Cosi.X_hat
	out.Com.MTRoot = round.Cosi.MTRoot
	return nil

}
コード例 #25
0
ファイル: nodeutils.go プロジェクト: mlncn/cothority
func (sn *Node) ReceivedHeartbeat(view int) {
	// XXX heartbeat should be associated with a specific view
	// if we get a heartbeat for an old view then nothing should change
	// there is a problem here where we could, if we receive a heartbeat
	// from an old view, try viewchanging into a view that we have already been to
	sn.hbLock.Lock()
	// hearbeat is nil if we have sust close the signing node
	if sn.heartbeat != nil {
		sn.heartbeat.Stop()
		sn.heartbeat = time.AfterFunc(HEARTBEAT, func() {
			dbg.Lvl4(sn.Name(), "NO HEARTBEAT - try view change:", view)
			sn.TryViewChange(view + 1)
		})
	}
	sn.hbLock.Unlock()

}
コード例 #26
0
ファイル: nodevoting.go プロジェクト: mlncn/cothority
func (sn *Node) ApplyAction(view int, v *Vote) {
	dbg.Lvl4(sn.Name(), "APPLYING ACTION")
	switch v.Type {
	case AddVT:
		sn.AddPeerToHostlist(view, v.Av.Name)
		if sn.Name() == v.Av.Parent {
			sn.AddChildren(view, v.Av.Name)
		}
	case RemoveVT:
		// removes node from Hostlist, and from children list
		sn.RemovePeer(view, v.Rv.Name)
		// not closing TCP connection on remove because if view
		// does not go through, connection essential to old/ current view closed
	default:
		dbg.Error("applyvote: unkown action type")
	}
}
コード例 #27
0
ファイル: app.go プロジェクト: mlncn/cothority
// StartedUp waits for everybody to start by contacting the
// monitor. Argument is total number of peers.
func (f Flags) StartedUp(total int) {
	monitor.Ready(f.Logger)
	// Wait for everybody to be ready before going on
	for {
		s, err := monitor.GetReady(f.Logger)
		if err != nil {
			dbg.Lvl1("Couldn't reach monitor")
		} else {
			if s.Ready != total {
				dbg.Lvl4(f.Hostname, "waiting for others to finish", s.Ready, total)
			} else {
				break
			}
			time.Sleep(time.Second)
		}
	}
	dbg.Lvl3(f.Hostname, "thinks everybody's here")
}
コード例 #28
0
ファイル: ntree.go プロジェクト: mlncn/cothority
func main() {

	conf := new(app.NTreeConfig)
	app.ReadConfig(conf)

	// we must know who we are
	if app.RunFlags.Hostname == "" {
		log.Fatal("Hostname empty: Abort")
	}

	own, depth := conf.Tree.FindByName(app.RunFlags.Hostname, 0)
	if depth == 0 {
		// i.e. we are root
		conf.Root = true
	}
	if own == nil {
		dbg.Fatal("Could not find its name in the tree", app.RunFlags.Hostname)
	}
	conf.Tree = own
	conf.Name = own.Name
	// Wait for everybody to be ready before going on
	ioutil.WriteFile("coll_stamp_up/up"+app.RunFlags.Hostname, []byte("started"), 0666)
	for {
		_, err := os.Stat("coll_stamp_up")
		if err == nil {
			files, _ := ioutil.ReadDir("coll_stamp_up")
			dbg.Lvl4(app.RunFlags.Hostname, "waiting for others to finish", len(files))
			time.Sleep(time.Second)
		} else {
			break
		}
	}
	dbg.Lvl2(app.RunFlags.Hostname, "thinks everybody's here")

	switch app.RunFlags.Mode {
	case "client":
		log.Panic("No client mode")
	case "server":
		RunServer(conf)
	}

}
コード例 #29
0
ファイル: node.go プロジェクト: mlncn/cothority
// run the given hostnames
func (hc *HostConfig) Run(stamper bool, signType sign.Type, hostname string) error {
	dbg.Lvl3(hc.Hosts, "going to connect everything for", hostname)
	node := hc.Hosts[hostname]

	node.Type = signType
	dbg.Lvl3("Listening on", node.Host)
	node.Host.Listen()

	var err error
	// exponential backoff for attempting to connect to parent
	startTime := time.Duration(200)
	maxTime := time.Duration(2000)
	for i := 0; i < 2000; i++ {
		dbg.Lvl3(hostname, "attempting to connect to parent")
		// the host should connect with the parent
		err = node.Connect(0)
		if err == nil {
			// log.Infoln("hostconfig: connected to parent:")
			break
		}

		time.Sleep(startTime * time.Millisecond)
		startTime *= 2
		if startTime > maxTime {
			startTime = maxTime
		}
	}
	if err != nil {
		dbg.Fatal(hostname, "failed to connect to parent")
		//return errors.New("failed to connect")
	} else {
		dbg.Lvl3(hostname, "successfully connected to parent")
	}

	if !stamper {
		// This will call the dispatcher in collectiveSigning for every request
		dbg.Lvl4("Starting to listen for incoming stamp-requests on", hostname)
		node.Listen()
	}

	return nil
}
コード例 #30
0
ファイル: tcphost.go プロジェクト: mlncn/cothority
func (h *TCPHost) ConnectTo(parent string) error {
	// If we have alReady set up this connection don't do anything
	h.PeerLock.Lock()
	if h.Ready[parent] {
		log.Println("ConnectTo: node already ready")
		h.PeerLock.RUnlock()
		return nil
	}
	h.PeerLock.Unlock()

	// connect to the parent
	conn, err := net.Dial("tcp4", parent)
	if err != nil {
		dbg.Lvl3("tcphost:", h.Name(), "failed to connect to parent:", err)
		return err
	}
	tp := NewTCPConnFromNet(conn)

	mname := StringMarshaler(h.Name())
	err = tp.PutData(&mname)
	if err != nil {
		log.Errorln("Putting data error:", err)
		return err
	}
	tp.SetName(parent)

	// give parent the public key
	err = tp.PutData(h.Pubkey)
	if err != nil {
		log.Errorln("failed to send public key")
		return err
	}

	// get and set the parents public key
	suite := h.suite
	pubkey := suite.Point()
	err = tp.GetData(pubkey)
	if err != nil {
		log.Errorln("failed to establish connection: getting pubkey:", err)
		tp.Close()
		return err
	}
	tp.SetPubKey(pubkey)

	h.PeerLock.Lock()
	h.Ready[tp.Name()] = true
	h.peers[parent] = tp
	// h.PendingPeers[parent] = true
	h.PeerLock.Unlock()
	dbg.Lvl4("Connected to parent:", parent)

	go func() {
		for {
			data := h.pool.Get().(BinaryUnmarshaler)
			err := tp.GetData(data)

			h.msgchan <- NetworkMessg{Data: data, From: tp.Name(), Err: err}
		}
	}()

	return nil
}