Ejemplo n.º 1
0
func (d *Localhost) Start(args ...string) error {
	os.Chdir(d.RunDir)
	dbg.Lvl4("Localhost: chdir into", d.RunDir)
	ex := d.RunDir + "/" + d.App
	dbg.Lvl4("Localhost: in Start() => hosts", d.Hosts)
	d.running = true
	dbg.Lvl1("Starting", len(d.Hosts), "applications of", ex)
	for index, host := range d.Hosts {
		dbg.Lvl3("Starting", index, "=", host)
		amroot := fmt.Sprintf("-amroot=%s", strconv.FormatBool(index == 0))
		cmdArgs := []string{"-hostname", host, "-mode", "server", "-logger",
			"localhost:" + monitor.SinkPort, amroot}
		cmdArgs = append(args, cmdArgs...)
		dbg.Lvl3("CmdArgs are", cmdArgs)
		cmd := exec.Command(ex, cmdArgs...)
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		go func(i int, h string) {
			dbg.Lvl3("Localhost: will start host", host)
			d.wg_run.Add(1)
			err := cmd.Run()
			if err != nil {
				dbg.Lvl3("Error running localhost", h, ":", err)
			}
			d.wg_run.Done()
			dbg.Lvl3("host (index", i, ")", h, "done")
		}(index, host)
	}
	return nil
}
Ejemplo n.º 2
0
// StatusReturn just adds up all children and sends the result to
// the parent
func (sn *Node) StatusReturn(view int, sm *SigningMessage) error {
	sn.PeerStatusRcvd += 1
	sn.PeerStatus.Responders += sm.SRm.Responders
	sn.PeerStatus.Peers += sm.SRm.Peers

	// Wait for other children before propagating the message
	if sn.PeerStatusRcvd < len(sn.Children(view)) {
		dbg.Lvl3(sn.Name(), "Waiting for other children")
		return nil
	}

	var err error = nil
	if sn.IsRoot(view) {
		// Add the root-node
		sn.PeerStatus.Peers += 1
		dbg.Lvl3("We got", sn.PeerStatus.Responders, "responses from", sn.PeerStatus.Peers, "peers.")
	} else {
		dbg.Lvl4(sn.Name(), "puts up statusReturn for", sn.PeerStatus)
		ctx := context.TODO()
		sm.SRm = &sn.PeerStatus
		err = sn.PutUp(ctx, view, sm)
	}
	dbg.Lvl3("Deleting round", sm.RoundNbr, sn.Rounds)
	delete(sn.Rounds, sm.RoundNbr)
	return err
}
Ejemplo n.º 3
0
// Scanner for a file contatining singatures
func SigScanner(filename string) ([]string, error) {
	var blocks []string
	head := "-----BEGIN PGP SIGNATURE-----"
	dbg.Lvl3("Reading file", filename)

	file, err := os.Open(filename)
	defer file.Close()
	if err != nil {
		dbg.Lvl1("Couldn't open file", file, err)
		return nil, err
	}

	scanner := bufio.NewScanner(file)
	var block []string
	for scanner.Scan() {
		text := scanner.Text()
		dbg.Lvl3("Decoding", text)
		// end of the first part
		if text == head {
			dbg.Lvl3("Found header")
			if len(block) > 0 {
				blocks = append(blocks, strings.Join(block, "\n"))
				block = make([]string, 0)
			}
		}
		block = append(block, text)
	}
	blocks = append(blocks, strings.Join(block, "\n"))
	return blocks, nil
}
Ejemplo n.º 4
0
// takes in a byte array representing an edge list and loads the graph
func (g *Graph) LoadEdgeList(edgelist []byte) {
	dbg.Lvl3(g.Names)
	fields := bytes.Fields(edgelist)
	// create name map from string to index
	dbg.Lvl3(g.Names)
	names := make(map[string]int)
	for i, n := range g.Names {
		names[n] = i
	}

	// read fields in groups of three: from, to, edgeweight
	for i := 0; i < len(fields)-2; i += 3 {
		from := string(fields[i])
		to := string(fields[i+1])
		weight, err := strconv.ParseFloat(string(fields[i+2]), 64)
		if err != nil {
			dbg.Lvl3(err)
			continue
		}
		fi, ok := names[from]
		if !ok {
			dbg.Lvl3("from not ok:", from)
			continue
		}

		ti, ok := names[to]
		if !ok {
			dbg.Lvl3("to not ok:", to)
			continue
		}

		g.Weights[fi][ti] = weight
	}
}
Ejemplo n.º 5
0
func (hc *HostConfig) String() string {
	b := bytes.NewBuffer([]byte{})

	// write the hosts
	b.WriteString("{\"hosts\": [")
	for i, sn := range hc.SNodes {
		if i != 0 {
			b.WriteString(", ")
		}
		b.WriteString("\"" + sn.Name() + "\"")
	}
	b.WriteString("],")

	// write the tree structure
	b.WriteString("\"tree\": ")
	if len(hc.SNodes) != 0 {
		root := hc.SNodes[0]
		writeHC(b, hc, root)
	} else {
		b.WriteString("{}")
	}
	b.WriteString("}\n")

	// format the resulting JSON for readability
	bformatted := bytes.NewBuffer([]byte{})
	err := json.Indent(bformatted, b.Bytes(), "", "\t")
	if err != nil {
		dbg.Lvl3(string(b.Bytes()))
		dbg.Lvl3("ERROR:", err)
	}

	return string(bformatted.Bytes())
}
Ejemplo n.º 6
0
func (sn *Node) CloseAll(view int) error {
	dbg.Lvl2(sn.Name(), "received CloseAll on", view)

	// At the leaves
	if len(sn.Children(view)) == 0 {
		dbg.Lvl3(sn.Name(), "in CloseAll is root leaf")
	} else {
		dbg.Lvl3(sn.Name(), "in CloseAll is calling", len(sn.Children(view)), "children")

		// Inform all children of announcement
		messgs := make([]coconet.BinaryMarshaler, sn.NChildren(view))
		for i := range messgs {
			sm := SigningMessage{
				Suite:   sn.Suite().String(),
				Type:    CloseAll,
				ViewNbr: view,
				//LastSeenVote: int(atomic.LoadInt64(&sn.LastSeenVote)),
			}
			messgs[i] = &sm
		}
		ctx := context.TODO()
		if err := sn.PutDown(ctx, view, messgs); err != nil {
			return err
		}
	}
	dbg.Lvl3("Closing down shop", sn.Isclosed)
	sn.Close()
	return nil
}
Ejemplo n.º 7
0
// Creates a new stamp listener one port above the
// address given in nameP
func NewStampListener(nameP string) *StampListener {
	// listen for client requests at one port higher
	// than the signing node
	var nameL string
	h, p, err := net.SplitHostPort(nameP)
	if err == nil {
		i, err := strconv.Atoi(p)
		if err != nil {
			dbg.Fatal(err)
		}
		nameL = net.JoinHostPort(h, strconv.Itoa(i+1))
	} else {
		dbg.Fatal("Couldn't split host into name and port:", err)
	}
	sl, ok := SLList[nameL]
	if !ok {
		sl = &StampListener{}
		dbg.Lvl3("Creating new StampListener for", nameL)
		sl.Queue = make([][]MustReplyMessage, 2)
		sl.Queue[READING] = make([]MustReplyMessage, 0)
		sl.Queue[PROCESSING] = make([]MustReplyMessage, 0)
		sl.Clients = make(map[string]coconet.Conn)
		sl.waitClose = make(chan string)
		sl.NameL = nameL

		SLList[sl.NameL] = sl
		sl.ListenRequests()
	} else {
		dbg.Lvl3("Taking cached StampListener")
	}
	return sl
}
Ejemplo n.º 8
0
func PrintTreeNode(t *Tree) {
	dbg.Lvl3(t.Name)

	for _, c := range t.Children {
		dbg.Lvl3("\t", c.Name)
	}
}
Ejemplo n.º 9
0
// Runs two conodes and tests if the value returned is OK
func TestStamp(t *testing.T) {
	dbg.TestOutput(testing.Verbose(), 4)
	peer1, peer2 := createPeers()
	go peer1.LoopRounds(conode.RoundStamperListenerType, 4)
	go peer2.LoopRounds(conode.RoundStamperListenerType, 4)
	time.Sleep(2 * time.Second)

	s, err := conode.NewStamp("testdata/config.toml")
	if err != nil {
		t.Fatal("Couldn't open config-file:", err)
	}

	for _, port := range []int{7000, 7010} {
		stamper := "localhost:" + strconv.Itoa(port)
		dbg.Lvl2("Contacting stamper", stamper)
		tsm, err := s.GetStamp([]byte("test"), stamper)
		dbg.Lvl3("Evaluating results of", stamper)
		if err != nil {
			t.Fatal("Couldn't get stamp from server:", err)
		}

		if !tsm.Srep.AggPublic.Equal(s.X0) {
			t.Fatal("Not correct aggregate public key")
		}
	}

	dbg.Lvl2("Closing peer1")
	peer1.Close()
	dbg.Lvl2("Closing peer2")
	peer2.Close()
	dbg.Lvl3("Done with test")
}
Ejemplo n.º 10
0
func (d *Deterlab) Start(args ...string) error {
	// setup port forwarding for viewing log server
	d.started = true
	// Remote tunneling : the sink port is used both for the sink and for the
	// proxy => the proxy redirects packets to the same port the sink is
	// listening.
	// -n = stdout == /Dev/null, -N => no command stream, -T => no tty
	cmd := []string{"-nNTf", "-o", "StrictHostKeyChecking=no", "-o", "ExitOnForwardFailure=yes", "-R", d.ProxyRedirectionPort + ":" + d.ProxyRedirectionAddress + ":" + monitor.SinkPort, fmt.Sprintf("%s@%s", d.Login, d.Host)}
	exCmd := exec.Command("ssh", cmd...)
	if err := exCmd.Start(); err != nil {
		dbg.Fatal("Failed to start the ssh port forwarding:", err)
	}
	if err := exCmd.Wait(); err != nil {
		dbg.Fatal("ssh port forwarding exited in failure:", err)
	}
	dbg.Lvl3("Setup remote port forwarding", cmd)
	go func() {
		err := cliutils.SshRunStdout(d.Login, d.Host, "cd remote; GOMAXPROCS=8 ./users")
		if err != nil {
			dbg.Lvl3(err)
		}
		d.sshDeter <- "finished"
	}()

	return nil
}
Ejemplo n.º 11
0
// Filter out a serie of values
func (df *DataFilter) Filter(measure string, values []float64) []float64 {
	// do we have a filter for this measure ?
	if _, ok := df.percentiles[measure]; !ok {
		return values
	}
	// Compute the percentile value
	max, err := stats.PercentileNearestRank(values, df.percentiles[measure])
	if err != nil {
		dbg.Lvl2("Monitor: Error filtering data:", err)
		return values
	}

	// Find the index from where to filter
	maxIndex := -1
	for i, v := range values {
		if v > max {
			maxIndex = i
		}
	}
	// check if we foud something to filter out
	if maxIndex == -1 {
		dbg.Lvl3("Filtering: nothing to filter for", measure)
		return values
	}
	// return the values below the percentile
	dbg.Lvl3("Filtering: filters out", measure, ":", maxIndex, "/", len(values))
	return values[:maxIndex]
}
Ejemplo n.º 12
0
// initiated by root, propagated by all others
func (sn *Node) Challenge(sm *SigningMessage) error {
	view := sm.ViewNbr
	RoundNbr := sm.RoundNbr
	dbg.Lvl3("Challenge for round", RoundNbr)
	// update max seen round
	sn.roundmu.Lock()
	sn.LastSeenRound = max(sn.LastSeenRound, RoundNbr)
	sn.roundmu.Unlock()

	children := sn.Children(view)

	challs := make([]*SigningMessage, len(children))
	i := 0
	for child := range children {
		challs[i] = &SigningMessage{
			Suite:    sn.Suite().String(),
			ViewNbr:  view,
			RoundNbr: RoundNbr,
			Type:     Challenge,
			To:       child,
			Chm: &ChallengeMessage{
				Message: make([]byte, 0),
			}}
		i++
	}

	round := sn.Rounds[RoundNbr]
	if round == nil {
		dbg.Lvl3("No Round Interface created for this round. Children:",
			len(children))
	} else {
		err := round.Challenge(sm, challs)
		if err != nil {
			return err
		}
	}

	// if we are a leaf, send the respond up
	if len(children) == 0 {
		sn.Respond(&SigningMessage{
			Suite:    sn.Suite().String(),
			Type:     Response,
			ViewNbr:  view,
			RoundNbr: RoundNbr,
		})
	} else {
		// otherwise continue to pass down challenge
		for _, out := range challs {
			if out.To != "" {
				conn := children[out.To]
				conn.PutData(out)
			} else {
				dbg.Error("Out.To == nil with children", children)
			}
		}
	}
	// dbg.Lvl4(sn.Name(), "Done handling challenge message")
	return nil
}
Ejemplo n.º 13
0
// Enables / Disables a measure.
func EnableMeasure(b bool) {
	if b {
		dbg.Lvl3("Monitor: Measure enabled")
	} else {
		dbg.Lvl3("Monitor: Measure disabled")
	}
	enabled = b
}
Ejemplo n.º 14
0
// ComputeSharedSecret will make the exchange of dealers between
// the peers and will compute the sharedsecret at the end
func (p *Peer) ComputeSharedSecret() *poly.SharedSecret {
	// Construct the dealer
	dealerKey := cliutils.KeyPair(p.suite)
	dealer := new(poly.Deal).ConstructDeal(&dealerKey, &p.key, p.info.T, p.info.R, p.pubKeys)
	// Construct the receiver
	receiver := poly.NewReceiver(p.suite, p.info, &p.key)
	// add already its own dealer
	_, err := receiver.AddDeal(p.Id, dealer)
	if err != nil {
		dbg.Fatal(p.String(), "could not add its own dealer >< ABORT")
	}

	// Send the dealer struct TO every one
	err = p.SendToAll(dealer)
	dbg.Lvl3(p.Name, "sent its dealer to every peers. (err =", err, ")")
	// Receive the dealer struct FROM every one
	// wait with a chan to get ALL dealers
	dealChan := make(chan *poly.Deal)
	for _, rp := range p.remote {
		go func(rp RemotePeer) {
			d := new(poly.Deal).UnmarshalInit(p.info.T, p.info.R, p.info.N, p.suite)
			err := p.suite.Read(rp.Conn, d)
			if err != nil {
				dbg.Fatal(p.Name, "received a strange dealer from", rp.String(), ":", err)
			}
			dealChan <- d
		}(rp)
	}

	// wait to get all dealers
	dbg.Lvl3(p.Name, "wait to receive every other peer's dealer...")
	n := 0
	for {
		// get the dealer and add it
		d := <-dealChan
		dbg.Lvl3(p.Name, "collected one more dealer (count =", n, ")")
		// TODO: get the response back to the dealer
		_, err := receiver.AddDeal(p.Id, d)
		if err != nil {
			dbg.Fatal(p.Name, "has error when adding the dealer:", err)
		}
		n += 1
		// we get enough dealers to compute the shared secret
		if n == p.info.T-1 {
			dbg.Lvl3(p.Name, "received every Dealers")
			break
		}
	}

	sh, err := receiver.ProduceSharedSecret()
	if err != nil {
		dbg.Fatal(p.Name, "could not produce shared secret. Abort. (err", err, ")")
	}
	dbg.Lvl3(p.Name, "produced shared secret !")
	return sh
}
Ejemplo n.º 15
0
// This will broadcast the final signature to give to client
// it contins the global Response adn global challenge
func (sn *Node) SignatureBroadcast(sm *SigningMessage) error {
	view := sm.ViewNbr
	RoundNbr := sm.RoundNbr
	dbg.Lvl3(sn.Name(), "received SignatureBroadcast on", view)
	sn.PeerStatusRcvd = 0

	ri := sn.Rounds[RoundNbr]
	if ri == nil {
		return fmt.Errorf("No round created for this round number (signature broadcast)")
	}
	out := make([]*SigningMessage, sn.NChildren(view))
	for i := range out {
		out[i] = &SigningMessage{
			Suite:    sn.Suite().String(),
			Type:     SignatureBroadcast,
			ViewNbr:  view,
			RoundNbr: RoundNbr,
			SBm: &SignatureBroadcastMessage{
				R0_hat: sn.suite.Secret().One(),
				C:      sn.suite.Secret().One(),
				X0_hat: sn.suite.Point().Null(),
				V0_hat: sn.suite.Point().Null(),
			},
		}
	}

	err := ri.SignatureBroadcast(sm, out)
	if err != nil {
		return err
	}

	if len(sn.Children(view)) > 0 {
		dbg.Lvl3(sn.Name(), "in SignatureBroadcast is calling", len(sn.Children(view)), "children")
		ctx := context.TODO()
		msgs := make([]coconet.BinaryMarshaler, len(out))
		for i := range msgs {
			msgs[i] = out[i]
			// Why oh why do we have to do this?
			out[i].SBm.X0_hat = sn.suite.Point().Add(out[i].SBm.X0_hat, sn.suite.Point().Null())
		}
		if err := sn.PutDown(ctx, view, msgs); err != nil {
			return err
		}
	} else {
		dbg.Lvl3(sn.Name(), "sending StatusReturn")
		return sn.StatusReturn(view, &SigningMessage{
			Suite:    sn.Suite().String(),
			Type:     StatusReturn,
			ViewNbr:  view,
			RoundNbr: RoundNbr,
			SRm:      &StatusReturnMessage{},
		})
	}
	return nil
}
Ejemplo n.º 16
0
func TrimLastIncompleteLevel(root *Tree, hosts []string, depths []int, bf int) (*Tree, []string) {
	treed := Depth(root)

	var sj, j int
	n := len(hosts)

	expectedNNodes := 1
	// if there is any incomplete tree level it should be treed
	// if it's not treed, then the tree should be fully filled for its depth
	// we check that this is the case and if treed is incomplete
	// we remove the nodes and hosts from the treed level
	lastLevel := treed
	for d := 1; d <= treed; d++ {
		nNodes := 0
		sj = j
		for ; j < n && depths[j] == d; j++ {
			nNodes++
		}

		if nNodes != expectedNNodes {
			lastLevel = d
			break
		}
		expectedNNodes *= bf
	}

	if lastLevel != treed {
		panic("Incomplete level is not last tree level" + strconv.Itoa(lastLevel) + " " + strconv.Itoa(treed))
	}

	bhMap := make(map[string]bool)
	badHosts := hosts[sj:]
	for _, bh := range badHosts {
		bhMap[bh] = true
	}
	newRoot := &Tree{Name: root.Name}
	TrimTree(newRoot, root, bhMap)

	d := Depth(newRoot)
	if len(badHosts) != 0 && d != treed-1 {
		dbg.Lvl3(d, "!=", treed-1)
		panic("TrimTree return wrong result")
	} else {
		if len(badHosts) == 0 && d != treed {
			dbg.Lvl3(d, "!=", treed)
			panic("TrimTree return wrong result")
		}
	}

	// dbg.Lvl3("			Trimmed", n-sj, "nodes")
	return newRoot, hosts[:sj]

}
Ejemplo n.º 17
0
// Monitor will start listening for incoming connections on this address
// It needs the stats struct pointer to update when measures come
// Return an error if something went wrong during the connection setup
func (m *Monitor) Listen() error {
	ln, err := net.Listen("tcp", Sink+":"+SinkPort)
	if err != nil {
		return fmt.Errorf("Error while monitor is binding address: %v", err)
	}
	m.listener = ln
	dbg.Lvl2("Monitor listening for stats on", Sink, ":", SinkPort)
	finished := false
	go func() {
		for {
			if finished {
				break
			}
			conn, err := ln.Accept()
			if err != nil {
				operr, ok := err.(*net.OpError)
				// We cant accept anymore we closed the listener
				if ok && operr.Op == "accept" {
					break
				}
				dbg.Lvl2("Error while monitor accept connection:", operr)
				continue
			}
			dbg.Lvl3("Monitor: new connection from", conn.RemoteAddr().String())
			m.mutexConn.Lock()
			m.conns[conn.RemoteAddr().String()] = conn
			go m.handleConnection(conn)
			m.mutexConn.Unlock()
		}
	}()
	for !finished {
		select {
		// new stats
		case measure := <-m.measures:
			m.update(measure)
		// end of a peer conn
		case peer := <-m.done:
			dbg.Lvl3("Connections left:", len(m.conns))
			m.mutexConn.Lock()
			delete(m.conns, peer)
			m.mutexConn.Unlock()
			// end of monitoring,
			if len(m.conns) == 0 {
				m.listener.Close()
				finished = true
				break
			}
		}
	}
	dbg.Lvl2("Monitor finished waiting !")
	m.conns = make(map[string]net.Conn)
	return nil
}
Ejemplo n.º 18
0
// Only sends a ready-string
func Ready(addr string) error {
	if encoder == nil {
		dbg.Lvl3("Connecting to sink", addr)
		err := ConnectSink(addr)
		if err != nil {
			return err
		}
	}
	dbg.Lvl3("Sending ready-signal")
	send(Measure{Name: "ready"})
	return nil
}
Ejemplo n.º 19
0
// Configure various
func (d *Localhost) Configure() {
	pwd, _ := os.Getwd()
	d.AppDir = pwd + "/../app"
	d.RunDir = pwd + "/platform/localhost"
	d.LocalDir = pwd
	d.Debug = dbg.DebugVisible
	d.running = false
	if d.App == "" {
		dbg.Fatal("No app defined in simulation")
	}
	dbg.Lvl3(fmt.Sprintf("Localhost dirs: AppDir %s, RunDir %s", d.AppDir, d.RunDir))
	dbg.Lvl3("Localhost configured ...")
}
Ejemplo n.º 20
0
// When client asks for val to be timestamped
// It blocks until it get a coll_stamp reply back
func (c *Client) TimeStamp(val []byte, TSServerName string) error {
	c.Mux.Lock()
	if c.Error != nil {
		c.Mux.Unlock()
		return c.Error
	}
	c.reqno++
	myReqno := c.reqno
	c.doneChan[c.reqno] = make(chan error, 1) // new done channel for new req
	c.Mux.Unlock()
	// send request to TSServer
	err := c.PutToServer(TSServerName,
		&conode.TimeStampMessage{
			Type:  conode.StampRequestType,
			ReqNo: myReqno,
			Sreq:  &conode.StampRequest{Val: val}})
	if err != nil {
		if err != coconet.ErrNotEstablished {
			dbg.Lvl3(c.Name(), "error timestamping to", TSServerName, ":", err)
		}
		// pass back up all errors from putting to server
		return err
	}
	dbg.Lvl4("Client Sent timestamp request to", TSServerName)

	// get channel associated with request
	c.Mux.Lock()
	myChan := c.doneChan[myReqno]
	c.Mux.Unlock()

	// wait until ProcessStampSignature signals that reply was received
	select {
	case err = <-myChan:
		//log.Println("-------------client received  response from" + TSServerName)
		break
	case <-time.After(10 * sign.ROUND_TIME):
		dbg.Lvl3("client timeouted on waiting for response from" + TSServerName)
		break
		// err = ErrClientToTSTimeout
	}
	if err != nil {
		dbg.Lvl3(c.Name(), "error received from DoneChan:", err)
		return err
	}

	// delete channel as it is of no longer meaningful
	c.Mux.Lock()
	delete(c.doneChan, myReqno)
	c.Mux.Unlock()
	return err
}
Ejemplo n.º 21
0
func ApprovalCheck(PolicyFile, SignaturesFile, Id string) (bool, error) {
	var (
		commit     SignedCommit               // Commit corresponding to be verified
		developers openpgp.EntityList         // List of all developers whose public keys are in the policy file
		approvers  map[string]*openpgp.Entity // Map of developers who provided a valid signature. Indexed by public key id (openpgp.PrimaryKey.KeyIdString)
		err        error
	)

	commit.Policy.Threshold, commit.Policy.DevPubKeys, commit.Policy.CothKey, err = PolicyScanner(PolicyFile)
	checkFileError(err, PolicyFile)
	commit.Signatures, err = SigScanner(SignaturesFile)
	checkFileError(err, SignaturesFile)
	commit.CommitID = Id
	// commit.CommitID, err = CommitScanner(CommitIdFile)
	// checkFileError(err, CommitIdFile)
	// commit.Approval = false

	approvers = make(map[string]*openpgp.Entity)

	// Creating openpgp entitylist from list of public keys
	developers = make(openpgp.EntityList, 0)
	for _, pubkey := range commit.Policy.DevPubKeys {
		keybuf, err := openpgp.ReadArmoredKeyRing(strings.NewReader(pubkey))
		if err != nil {
			dbg.Error("Could not decode armored public key", err)
		}
		for _, entity := range keybuf {
			developers = append(developers, entity)
		}
	}

	// Verifying every signature in the list and counting valid ones
	for _, signature := range commit.Signatures {
		result, err := openpgp.CheckArmoredDetachedSignature(developers, bytes.NewBufferString(commit.CommitID), strings.NewReader(signature))
		if err != nil {
			dbg.Lvl1("The signature is invalid or cannot be verified due to", err)
		} else {
			if approvers[result.PrimaryKey.KeyIdString()] == nil { // We need to check that this is a unique signature
				approvers[result.PrimaryKey.KeyIdString()] = result
				dbg.Lvl3("Approver: %+v", result.Identities)
			}
		}
	}

	dbg.Lvl3("Is release approved? ", len(approvers) >= commit.Policy.Threshold)
	// commit.Approval = (len(approvers) >= commit.Policy.Threshold)

	return len(approvers) >= commit.Policy.Threshold, err
	// return commit, err
}
Ejemplo n.º 22
0
// Will build the application
func (d *Localhost) Build(build string) error {
	src, _ := filepath.Rel(d.LocalDir, d.AppDir+"/"+d.App)
	dst := d.RunDir + "/" + d.App
	start := time.Now()
	// build for the local machine
	res, err := cliutils.Build(src, dst, runtime.GOARCH, runtime.GOOS)
	if err != nil {
		dbg.Fatal("Error while building for localhost (src", src, ", dst", dst, ":", res)
	}
	dbg.Lvl3("Localhost: Build src", src, ", dst", dst)
	dbg.Lvl3("Localhost: Results of localhost build:", res)
	dbg.Lvl2("Localhost: build finished in", time.Since(start))
	return err
}
Ejemplo n.º 23
0
// WaitSYNs will wait until every peers has syn'd with this one
func (p *Peer) WaitSYNs() {
	for {
		s := <-p.synChan
		dbg.Lvl3(p.Name, "synChan received Syn id", s.Id)
		_, ok := p.remote[s.Id]
		if !ok {
			dbg.Fatal(p.Name, "received syn'd notification of an unknown peer... ABORT")
		}
		if len(p.remote) == p.info.N-1 {
			dbg.Lvl3(p.Name, "is SYN'd with every one")
			break
		}
	}
}
Ejemplo n.º 24
0
// NewPeer returns a peer that can be used to set up
// connections.
func NewPeer(address string, conf *app.ConfigConode) *Peer {
	suite := app.GetSuite(conf.Suite)

	var err error
	// make sure address has a port or insert default one
	address, err = cliutils.VerifyPort(address, DefaultPort)
	if err != nil {
		dbg.Fatal(err)
	}

	// For retro compatibility issues, convert the base64 encoded key into hex
	// encoded keys....
	convertTree(suite, conf.Tree)
	// Add our private key to the tree (compatibility issues again with graphs/
	// lib)
	addPrivateKey(suite, address, conf)
	// load the configuration
	dbg.Lvl3("loading configuration")
	var hc *graphs.HostConfig
	opts := graphs.ConfigOptions{ConnType: "tcp", Host: address, Suite: suite}
	hc, err = graphs.LoadConfig(conf.Hosts, conf.Tree, suite, opts)
	if err != nil {
		dbg.Fatal(err)
	}

	// Listen to stamp-requests on port 2001
	node := hc.Hosts[address]
	peer := &Peer{
		conf:      conf,
		Node:      node,
		RLock:     sync.Mutex{},
		CloseChan: make(chan bool, 5),
		Hostname:  address,
	}

	// Start the cothority-listener on port 2000
	err = hc.Run(true, sign.MerkleTree, address)
	if err != nil {
		dbg.Fatal(err)
	}

	go func() {
		err := peer.Node.Listen()
		dbg.Lvl3("Node.listen quits with status", err)
		peer.CloseChan <- true
		peer.Close()
	}()
	return peer
}
Ejemplo n.º 25
0
// ConnectSink connects to the given endpoint and initialises a json
// encoder. It can be the address of a proxy or a monitoring process.
// Returns an error if it could not connect to the endpoint.
func ConnectSink(addr string) error {
	if encoder != nil {
		return nil
	}
	dbg.Lvl3("Connecting to:", addr)
	conn, err := net.Dial("tcp", addr)
	if err != nil {
		return err
	}
	dbg.Lvl3("Connected to sink:", addr)
	sink = addr
	connection = conn
	encoder = json.NewEncoder(conn)
	return nil
}
Ejemplo n.º 26
0
func RunServer(flags *app.Flags, conf *app.ConfigColl) {
	hostname := flags.Hostname
	if hostname == conf.Hosts[0] {
		dbg.Lvlf3("Tree is %+v", conf.Tree)
	}
	dbg.Lvl3(hostname, "Starting to run")

	app.RunFlags.StartedUp(len(conf.Hosts))
	peer := conode.NewPeer(hostname, conf.ConfigConode)

	if app.RunFlags.AmRoot {
		for {
			setupRound := sign.NewRoundSetup(peer.Node)
			peer.StartAnnouncementWithWait(setupRound, 5*time.Second)
			counted := <-setupRound.Counted
			dbg.Lvl1("Number of peers counted:", counted)
			if counted == len(conf.Hosts) {
				dbg.Lvl1("All hosts replied")
				break
			}
			time.Sleep(time.Second)
		}
	}

	RegisterRoundMeasure(peer.Node.LastRound())
	peer.LoopRounds(RoundMeasureType, conf.Rounds)
	dbg.Lvlf3("Done - flags are %+v", app.RunFlags)
	monitor.End()
}
Ejemplo n.º 27
0
// NewRoundException creates a new RoundException based on RoundCosi
func NewRoundException(node *Node) *RoundException {
	dbg.Lvl3("Making new RoundException", node.Name())
	round := &RoundException{}
	round.RoundCosi = NewRoundCosi(node)
	round.Type = RoundExceptionType
	return round
}
Ejemplo n.º 28
0
func NewRoundStamper(node *sign.Node) *RoundStamper {
	dbg.Lvl3("Making new RoundStamper", node.Name())
	round := &RoundStamper{}
	round.RoundCosi = sign.NewRoundCosi(node)
	round.Type = RoundStamperType
	return round
}
Ejemplo n.º 29
0
func setup_deter() {
	vpmap := make(map[string]string)
	for i := range deter.Virt {
		vpmap[deter.Virt[i]] = deter.Phys[i]
	}

	deter.Phys = deter.Phys[:]
	deter.Virt = deter.Virt[:]

	hostnames := deter.Hostnames
	dbg.Lvl4("hostnames:", hostnames)

	rootname = hostnames[0]

	// mapping from physical node name to the app servers that are running there
	// essentially a reverse mapping of vpmap except ports are also used
	physToServer = make(map[string][]string)
	for _, virt := range hostnames {
		v, _, _ := net.SplitHostPort(virt)
		p := vpmap[v]
		ss := physToServer[p]
		ss = append(ss, virt)
		physToServer[p] = ss
	}
	dbg.Lvl3("PhysToServer is", physToServer)

}
Ejemplo n.º 30
0
// NewPeer returns a new peer with its id and the number of peers in the schnorr signature algo
// TODO verification of string addr:port
func NewPeer(id int, name string, suite abstract.Suite, p poly.Threshold, isRoot bool) *Peer {

	if id >= p.N {
		log.Fatal("Error while NewPeer: gien", id, "as id whereas polyinfo.N =", p.N)

	}
	// Setup of the private / public pair
	key := cliutils.KeyPair(suite)
	// setup of the public list of key
	pubKeys := make([]abstract.Point, p.N)
	pubKeys[id] = key.Public
	dbg.Lvl3(name, "(id", id, ") has created its private/public key: public =>", key.Public)

	return &Peer{
		Id:      id,
		remote:  make(map[int]RemotePeer),
		root:    isRoot,
		Name:    name,
		info:    p,
		suite:   suite,
		key:     key,
		pubKeys: pubKeys,
		schnorr: new(poly.Schnorr),
		synChan: make(chan Syn),
		ackChan: make(chan Ack),
	}
}