Пример #1
0
// Test closing and opening of Host on same address
func TestHostClose(t *testing.T) {
	defer log.AfterTest(t)
	time.Sleep(time.Second)
	h1 := sda.NewLocalHost(2000)
	h2 := sda.NewLocalHost(2001)
	h1.ListenAndBind()
	_, err := h2.Connect(h1.ServerIdentity)
	if err != nil {
		t.Fatal("Couldn't Connect()", err)
	}
	err = h1.Close()
	if err != nil {
		t.Fatal("Couldn't close:", err)
	}
	err = h2.Close()
	if err != nil {
		t.Fatal("Couldn't close:", err)
	}
	log.Lvl3("Finished first connection, starting 2nd")
	h3 := sda.NewLocalHost(2002)
	h3.ListenAndBind()
	c, err := h2.Connect(h3.ServerIdentity)
	if err != nil {
		t.Fatal(h2, "Couldn Connect() to", h3)
	}
	log.Lvl3("Closing h3")
	err = h3.Close()
	if err != nil {
		// try closing the underlying connection manually and fail
		c.Close()
		t.Fatal("Couldn't Close()", h3)
	}
}
Пример #2
0
// handleAnnounce receive the announcement from another node
// it reply with an ACK.
func (b *Broadcast) handleContactNodes(msg struct {
	*sda.TreeNode
	ContactNodes
}) {
	log.Lvl3(b.Info(), "Received message from", msg.TreeNode.String())
	if msg.TreeNode.ID == b.Root().ID {
		b.repliesLeft = len(b.Tree().List()) - b.tnIndex - 1
		if b.repliesLeft == 0 {
			log.Lvl3("Won't contact anybody - finishing")
			b.SendTo(b.Root(), &Done{})
			return
		}
		log.Lvl3(b.Info(), "Contacting nodes:", b.repliesLeft)
		// Connect to all nodes that are later in the TreeNodeList, but only if
		// the message comes from root
		for _, tn := range b.Tree().List()[b.tnIndex+1:] {
			log.Lvl3("Connecting to", tn.String())
			err := b.SendTo(tn, &ContactNodes{})
			if err != nil {
				return
			}
		}
	} else {
		// Tell the caller we're done
		log.Lvl3("Sending back to", msg.TreeNode.ServerIdentity.String())
		b.SendTo(msg.TreeNode, &Done{})
	}
}
Пример #3
0
// Start will execute one cothority-binary for each server
// configured
func (d *Localhost) Start(args ...string) error {
	if err := os.Chdir(d.runDir); err != nil {
		return err
	}
	log.Lvl4("Localhost: chdir into", d.runDir)
	ex := d.runDir + "/" + d.Simulation
	d.running = true
	log.Lvl1("Starting", d.servers, "applications of", ex)
	for index := 0; index < d.servers; index++ {
		d.wgRun.Add(1)
		log.Lvl3("Starting", index)
		host := "localhost" + strconv.Itoa(index)
		cmdArgs := []string{"-address", host, "-monitor",
			"localhost:" + strconv.Itoa(d.monitorPort),
			"-simul", d.Simulation,
			"-debug", strconv.Itoa(log.DebugVisible()),
		}
		cmdArgs = append(args, cmdArgs...)
		log.Lvl3("CmdArgs are", cmdArgs)
		cmd := exec.Command(ex, cmdArgs...)
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		go func(i int, h string) {
			log.Lvl3("Localhost: will start host", h)
			err := cmd.Run()
			if err != nil {
				log.Error("Error running localhost", h, ":", err)
				d.errChan <- err
			}
			d.wgRun.Done()
			log.Lvl3("host (index", i, ")", h, "done")
		}(index, host)
	}
	return nil
}
Пример #4
0
// Wait for all processes to finish
func (d *Localhost) Wait() error {
	log.Lvl3("Waiting for processes to finish")

	var err error
	go func() {
		d.wgRun.Wait()
		log.Lvl3("WaitGroup is 0")
		// write to error channel when done:
		d.errChan <- nil
	}()

	// if one of the hosts fails, stop waiting and return the error:
	select {
	case e := <-d.errChan:
		log.Lvl3("Finished waiting for hosts:", e)
		if e != nil {
			if err := d.Cleanup(); err != nil {
				log.Error("Couldn't cleanup running instances",
					err)
			}
			err = e
		}
	}

	log.Lvl2("Processes finished")
	return err
}
Пример #5
0
// Filter out a serie of values
func (df *DataFilter) Filter(measure string, values []float64) []float64 {
	// do we have a filter for this measure ?
	if _, ok := df.percentiles[measure]; !ok {
		return values
	}
	// Compute the percentile value
	max, err := stats.PercentileNearestRank(values, df.percentiles[measure])
	if err != nil {
		log.Lvl2("Monitor: Error filtering data(", values, "):", err)
		return values
	}

	// Find the index from where to filter
	maxIndex := -1
	for i, v := range values {
		if v > max {
			maxIndex = i
		}
	}
	// check if we foud something to filter out
	if maxIndex == -1 {
		log.Lvl3("Filtering: nothing to filter for", measure)
		return values
	}
	// return the values below the percentile
	log.Lvl3("Filtering: filters out", measure, ":", maxIndex, "/", len(values))
	return values[:maxIndex]
}
Пример #6
0
// handleCommit receives commit messages and signal the end if it received
// enough of it.
func (p *Protocol) handleCommit(com *Commit) {
	if p.state != stateCommit {
		//	log.Lvl3(p.Name(), "STORE handle commit packet")
		p.tempCommitMsg = append(p.tempCommitMsg, com)
		return
	}
	// finish after threshold of Commit msgs
	p.commitMsgCount++
	log.Lvl4(p.Name(), "----------------\nWe got", p.commitMsgCount,
		"COMMIT msgs and threshold is", p.threshold)
	if p.IsRoot() {
		log.Lvl4("Leader got ", p.commitMsgCount)
	}
	if p.commitMsgCount >= p.threshold {
		p.state = stateFinished
		// reset counter
		p.commitMsgCount = 0
		log.Lvl3(p.Name(), "Threshold reached: We are done... CONSENSUS")
		if p.IsRoot() && p.onDoneCB != nil {
			log.Lvl3(p.Name(), "We are root and threshold reached: return to the simulation.")
			p.onDoneCB()
			p.finish()
		}
		return
	}
}
Пример #7
0
// listen starts listening for messages coming from any host that tries to
// contact this host. If 'wait' is true, it will try to connect to itself before
// returning.
func (h *Host) listen(wait bool) {
	log.Lvl3(h.ServerIdentity.First(), "starts to listen")
	fn := func(c network.SecureConn) {
		log.Lvl3(h.workingAddress, "Accepted Connection from", c.Remote())
		// register the connection once we know it's ok
		h.registerConnection(c)
		h.handleConn(c)
	}
	go func() {
		log.Lvl4("Host listens on:", h.workingAddress)
		err := h.host.Listen(fn)
		if err != nil {
			log.Fatal("Couldn't listen on", h.workingAddress, ":", err)
		}
	}()
	if wait {
		for {
			log.Lvl4(h.ServerIdentity.First(), "checking if listener is up")
			_, err := h.Connect(h.ServerIdentity)
			if err == nil {
				log.Lvl4(h.ServerIdentity.First(), "managed to connect to itself")
				break
			}
			time.Sleep(network.WaitRetry)
		}
	}
}
Пример #8
0
// CreateRoster creates an Roster with the host-names in 'addresses'.
// It creates 's.Hosts' entries, starting from 'port' for each round through
// 'addresses'
func (s *SimulationBFTree) CreateRoster(sc *SimulationConfig, addresses []string, port int) {
	start := time.Now()
	nbrAddr := len(addresses)
	if sc.PrivateKeys == nil {
		sc.PrivateKeys = make(map[string]abstract.Scalar)
	}
	hosts := s.Hosts
	if s.SingleHost {
		// If we want to work with a single host, we only make one
		// host per server
		log.Fatal("Not supported yet")
		hosts = nbrAddr
		if hosts > s.Hosts {
			hosts = s.Hosts
		}
	}
	localhosts := false
	listeners := make([]net.Listener, hosts)
	if strings.Contains(addresses[0], "localhost") {
		localhosts = true
	}
	entities := make([]*network.ServerIdentity, hosts)
	log.Lvl3("Doing", hosts, "hosts")
	key := config.NewKeyPair(network.Suite)
	for c := 0; c < hosts; c++ {
		key.Secret.Add(key.Secret,
			key.Suite.Scalar().One())
		key.Public.Add(key.Public,
			key.Suite.Point().Base())
		address := addresses[c%nbrAddr] + ":"
		if localhosts {
			// If we have localhosts, we have to search for an empty port
			var err error
			listeners[c], err = net.Listen("tcp", ":0")
			if err != nil {
				log.Fatal("Couldn't search for empty port:", err)
			}
			_, p, _ := net.SplitHostPort(listeners[c].Addr().String())
			address += p
			log.Lvl4("Found free port", address)
		} else {
			address += strconv.Itoa(port + c/nbrAddr)
		}
		entities[c] = network.NewServerIdentity(key.Public, address)
		sc.PrivateKeys[entities[c].Addresses[0]] = key.Secret
	}
	// And close all our listeners
	if localhosts {
		for _, l := range listeners {
			err := l.Close()
			if err != nil {
				log.Fatal("Couldn't close port:", l, err)
			}
		}
	}

	sc.Roster = NewRoster(entities)
	log.Lvl3("Creating entity List took: " + time.Now().Sub(start).String())
}
Пример #9
0
// EnableMeasure will actually allow the sending of the measures if given true.
// Otherwise all measures won't be sent at all.
func EnableMeasure(b bool) {
	if b {
		log.Lvl3("Monitor: Measure enabled")
	} else {
		log.Lvl3("Monitor: Measure disabled")
	}
	enabled = b
}
Пример #10
0
func TestHostClose2(t *testing.T) {
	defer log.AfterTest(t)
	local := sda.NewLocalTest()
	defer local.CloseAll()

	_, _, tree := local.GenTree(2, false, true, true)
	log.Lvl3(tree.Dump())
	time.Sleep(time.Millisecond * 100)
	log.Lvl3("Done")
}
Пример #11
0
// CreateTree the tree as defined in SimulationBFTree and stores the result
// in 'sc'
func (s *SimulationBFTree) CreateTree(sc *SimulationConfig) error {
	log.Lvl3("CreateTree strarted")
	start := time.Now()
	if sc.Roster == nil {
		return errors.New("Empty Roster")
	}
	sc.Tree = sc.Roster.GenerateBigNaryTree(s.BF, s.Hosts)
	log.Lvl3("Creating tree took: " + time.Now().Sub(start).String())
	return nil
}
Пример #12
0
// RunTest a single test - takes a test-file as a string that will be copied
// to the deterlab-server
func RunTest(rc platform.RunConfig) (*monitor.Stats, error) {
	done := make(chan struct{})
	CheckHosts(rc)
	rc.Delete("simulation")
	rs := monitor.NewStats(rc.Map(), "hosts", "bf")
	monitor := monitor.NewMonitor(rs)

	if err := deployP.Deploy(rc); err != nil {
		log.Error(err)
		return rs, err
	}

	monitor.SinkPort = monitorPort
	if err := deployP.Cleanup(); err != nil {
		log.Error(err)
		return rs, err
	}
	monitor.SinkPort = monitorPort
	go func() {
		if err := monitor.Listen(); err != nil {
			log.Fatal("Could not monitor.Listen():", err)
		}
	}()
	// Start monitor before so ssh tunnel can connect to the monitor
	// in case of deterlab.
	err := deployP.Start()
	if err != nil {
		log.Error(err)
		return rs, err
	}

	go func() {
		var err error
		if err = deployP.Wait(); err != nil {
			log.Lvl3("Test failed:", err)
			if err := deployP.Cleanup(); err != nil {
				log.Lvl3("Couldn't cleanup platform:", err)
			}
			done <- struct{}{}
		}
		log.Lvl3("Test complete:", rs)
		done <- struct{}{}
	}()

	timeOut := getRunWait(rc)
	// can timeout the command if it takes too long
	select {
	case <-done:
		monitor.Stop()
		return rs, nil
	case <-time.After(time.Second * time.Duration(timeOut)):
		monitor.Stop()
		return rs, errors.New("Simulation timeout")
	}
}
Пример #13
0
// FuncPrepareClose sends a `PrepareClose`-message down the tree.
func (p *ProtocolCloseAll) FuncPrepareClose(pc PrepareCloseMsg) {
	log.Lvl3(pc.ServerIdentity.Addresses, "sent PrepClose to", p.ServerIdentity().Addresses)
	if !p.IsLeaf() {
		for _, c := range p.Children() {
			err := p.SendTo(c, &PrepareClose{})
			log.Lvl3(p.ServerIdentity().Addresses, "sends to", c.ServerIdentity.Addresses, "(err=", err, ")")
		}
	} else {
		p.FuncClose(nil)
	}
}
Пример #14
0
// Cleanup kills all running cothority-binaryes
func (d *Localhost) Cleanup() error {
	log.Lvl3("Cleaning up")
	ex := d.runDir + "/" + d.Simulation
	err := exec.Command("pkill", "-f", ex).Run()
	if err != nil {
		log.Lvl3("Error stopping localhost", err)
	}

	// Wait for eventual connections to clean up
	time.Sleep(time.Second)
	return nil
}
Пример #15
0
// Configure various internal variables
func (d *Localhost) Configure(pc *Config) {
	pwd, _ := os.Getwd()
	d.runDir = pwd + "/platform/localhost"
	d.localDir = pwd
	d.debug = pc.Debug
	d.running = false
	d.monitorPort = pc.MonitorPort
	d.errChan = make(chan error)
	if d.Simulation == "" {
		log.Fatal("No simulation defined in simulation")
	}
	log.Lvl3(fmt.Sprintf("Localhost dirs: RunDir %s", d.runDir))
	log.Lvl3("Localhost configured ...")
}
Пример #16
0
// HandleReply is the message going up the tree and holding a counter
// to verify the number of nodes.
func (p *ProtocolExampleHandlers) HandleReply(reply []StructReply) error {
	children := 1
	for _, c := range reply {
		children += c.ChildrenCount
	}
	log.Lvl3(p.ServerIdentity().Addresses, "is done with total of", children)
	if !p.IsRoot() {
		log.Lvl3("Sending to parent")
		return p.SendTo(p.Parent(), &Reply{children})
	}
	log.Lvl3("Root-node is done - nbr of children found:", children)
	p.ChildCount <- children
	return nil
}
Пример #17
0
// listen will select on the differents channels
func (nt *Ntree) listen() {
	for {
		select {
		// Dispatch the block through the whole tree
		case msg := <-nt.announceChan:
			log.Lvl3(nt.Name(), "Received Block announcement")
			nt.block = msg.BlockAnnounce.Block
			// verify the block
			go byzcoin.VerifyBlock(nt.block, "", "", nt.verifyBlockChan)
			if nt.IsLeaf() {
				nt.startBlockSignature()
				continue
			}
			for _, tn := range nt.Children() {
				err := nt.SendTo(tn, &msg.BlockAnnounce)
				if err != nil {
					log.Error(nt.Name(),
						"couldn't send to", tn.Name(),
						err)
				}
			}
			// generate your own signature / exception and pass that up to the
			// root
		case msg := <-nt.blockSignatureChan:
			nt.handleBlockSignature(&msg.NaiveBlockSignature)
			// Dispatch the signature + expcetion made before through the whole
			// tree
		case msg := <-nt.roundSignatureRequestChan:
			log.Lvl3(nt.Name(), " Signature Request Received")
			go nt.verifySignatureRequest(&msg.RoundSignatureRequest)

			if nt.IsLeaf() {
				nt.startSignatureResponse()
				continue
			}

			for _, tn := range nt.Children() {
				err := nt.SendTo(tn, &msg.RoundSignatureRequest)
				if err != nil {
					log.Error(nt.Name(), "couldn't sent to",
						tn.Name(), err)
				}
			}
			// Decide if we want to sign this or not
		case msg := <-nt.roundSignatureResponseChan:
			nt.handleRoundSignatureResponse(&msg.RoundSignatureResponse)
		}
	}
}
Пример #18
0
// ConnectSink connects to the given endpoint and initialises a json
// encoder. It can be the address of a proxy or a monitoring process.
// Returns an error if it could not connect to the endpoint.
func ConnectSink(addr string) error {
	if encoder != nil {
		return nil
	}
	log.Lvl3("Connecting to:", addr)
	conn, err := net.Dial("tcp", addr)
	if err != nil {
		return err
	}
	log.Lvl3("Connected to sink:", addr)
	sink = addr
	connection = conn
	encoder = json.NewEncoder(conn)
	return nil
}
Пример #19
0
// Dispatch can handle timeouts
func (p *Propagate) Dispatch() error {
	process := true
	log.Lvl4(p.ServerIdentity())
	for process {
		p.Lock()
		timeout := time.Millisecond * time.Duration(p.sd.Msec)
		p.Unlock()
		select {
		case msg := <-p.ChannelSD:
			log.Lvl3(p.ServerIdentity(), "Got data from", msg.ServerIdentity)
			if p.onData != nil {
				_, netMsg, err := network.UnmarshalRegistered(msg.Data)
				if err == nil {
					p.onData(netMsg)
				}
			}
			if !p.IsRoot() {
				log.Lvl3(p.ServerIdentity(), "Sending to parent")
				p.SendToParent(&PropagateReply{})
			}
			if p.IsLeaf() {
				process = false
			} else {
				log.Lvl3(p.ServerIdentity(), "Sending to children")
				p.SendToChildren(&msg.PropagateSendData)
			}
		case <-p.ChannelReply:
			p.received++
			log.Lvl4(p.ServerIdentity(), "received:", p.received, p.subtree)
			if !p.IsRoot() {
				p.SendToParent(&PropagateReply{})
			}
			if p.received == p.subtree {
				process = false
			}
		case <-time.After(timeout):
			log.Fatal("Timeout")
			process = false
		}
	}
	if p.IsRoot() {
		if p.onDoneCb != nil {
			p.onDoneCb(p.received + 1)
		}
	}
	p.Done()
	return nil
}
Пример #20
0
// NewDataFilter returns a new data filter initialized with the rights values
// taken out from the run config. If absent, will take defaults values.
// Keys expected are:
// discard_measurementname = perc => will take the lower and upper percentile =
// perc
// discard_measurementname = lower,upper => will take different percentiles
func NewDataFilter(config map[string]string) DataFilter {
	df := DataFilter{
		percentiles: make(map[string]float64),
	}
	reg, err := regexp.Compile("filter_(\\w+)")
	if err != nil {
		log.Lvl1("DataFilter: Error compiling regexp:", err)
		return df
	}
	// analyse the each entry
	for k, v := range config {
		if measure := reg.FindString(k); measure == "" {
			continue
		} else {
			// this value must be filtered by how many ?
			perc, err := strconv.ParseFloat(v, 64)
			if err != nil {
				log.Lvl1("DataFilter: Cannot parse value for filter measure:", measure)
				continue
			}
			measure = strings.Replace(measure, "filter_", "", -1)
			df.percentiles[measure] = perc
		}
	}
	log.Lvl3("Filtering:", df.percentiles)
	return df
}
Пример #21
0
func (p *ProtocolHandlers) HandleMessageAggregate(msg []struct {
	*sda.TreeNode
	NodeTestAggMsg
}) {
	log.Lvl3("Received message")
	IncomingHandlers <- p.TreeNodeInstance
}
Пример #22
0
func TestHashChunk(t *testing.T) {
	tmpfileIO, err := ioutil.TempFile("", "hash_test.bin")
	if err != nil {
		t.Fatal(err)
	}
	tmpfileIO.Close()
	tmpfile := tmpfileIO.Name()
	defer os.Remove(tmpfile)
	str := make([]byte, 1234)
	err = ioutil.WriteFile(tmpfile, str, 0777)
	if err != nil {
		t.Fatal("Couldn't write file")
	}

	for _, i := range []int{16, 128, 1024} {
		log.Lvl3("Reading", i, "bytes")
		hash, err := crypto.HashFileChunk(ed25519.NewAES128SHA256Ed25519(false).Hash(),
			tmpfile, i)
		if err != nil {
			t.Fatal("Couldn't hash", tmpfile, err)
		}
		if len(hash) != 32 {
			t.Fatal("Length of sha256 should be 32")
		}
	}
}
Пример #23
0
// Start will contact everyone and make the connections
func (b *Broadcast) Start() error {
	n := len(b.Tree().List())
	b.repliesLeft = n * (n - 1) / 2
	log.Lvl3(b.Name(), "Sending announce to", b.repliesLeft, "nodes")
	b.SendTo(b.Root(), &ContactNodes{})
	return nil
}
Пример #24
0
// Start the last phase : send up the final signature
func (nt *Ntree) startSignatureResponse() {
	log.Lvl3(nt.Name(), "Start Signature Response phase")
	nt.computeSignatureResponse()
	if err := nt.SendTo(nt.Parent(), nt.tempSignatureResponse); err != nil {
		log.Error(err)
	}
}
Пример #25
0
// handleConnection will decode the data received and aggregates it into its
// stats
func (m *Monitor) handleConnection(conn net.Conn) {
	dec := json.NewDecoder(conn)
	nerr := 0
	for {
		measure := &SingleMeasure{}
		if err := dec.Decode(measure); err != nil {
			// if end of connection
			if err == io.EOF || strings.Contains(err.Error(), "closed") {
				break
			}
			// otherwise log it
			log.Lvl2("Error: monitor decoding from", conn.RemoteAddr().String(), ":", err)
			nerr++
			if nerr > 1 {
				log.Lvl2("Monitor: too many errors from", conn.RemoteAddr().String(), ": Abort.")
				break
			}
		}

		log.Lvlf3("Monitor: received a Measure from %s: %+v", conn.RemoteAddr().String(), measure)
		// Special case where the measurement is indicating a FINISHED step
		switch strings.ToLower(measure.Name) {
		case "end":
			log.Lvl3("Finishing monitor")
			m.done <- conn.RemoteAddr().String()
		default:
			m.measures <- measure
		}
	}
}
Пример #26
0
func (n *TreeNodeInstance) dispatchMsgReader() {
	for {
		n.msgDispatchQueueMutex.Lock()
		if n.closing == true {
			log.Lvl3("Closing reader")
			n.msgDispatchQueueMutex.Unlock()
			return
		}
		if len(n.msgDispatchQueue) > 0 {
			log.Lvl4(n.Info(), "Read message and dispatching it",
				len(n.msgDispatchQueue))
			msg := n.msgDispatchQueue[0]
			n.msgDispatchQueue = n.msgDispatchQueue[1:]
			n.msgDispatchQueueMutex.Unlock()
			err := n.dispatchMsgToProtocol(msg)
			if err != nil {
				log.Error("Error while dispatching message:", err)
			}
		} else {
			n.msgDispatchQueueMutex.Unlock()
			log.Lvl4(n.Info(), "Waiting for message")
			<-n.msgDispatchQueueWait
		}
	}
}
Пример #27
0
// The core of the file: read any input from the connection and outputs it into
// the server connection
func proxyConnection(conn net.Conn, done chan bool) {
	dec := json.NewDecoder(conn)
	nerr := 0
	for {
		m := SingleMeasure{}
		// Receive data
		if err := dec.Decode(&m); err != nil {
			if err == io.EOF {
				break
			}
			log.Lvl1("Error receiving data from", conn.RemoteAddr().String(), ":", err)
			nerr++
			if nerr > 1 {
				log.Lvl1("Too many errors from", conn.RemoteAddr().String(), ": Abort connection")
				break
			}
		}
		log.Lvl3("Proxy received", m)

		// Proxy data back to monitor
		if err := serverEnc.Encode(m); err != nil {
			log.Lvl2("Error proxying data :", err)
			break
		}
		if m.Name == "end" {
			// the end
			log.Lvl2("Proxy detected end of measurement. Closing connection.")
			break
		}
	}
	if err := conn.Close(); err != nil {
		log.Error("Couldn't close connection:", err)
	}
	done <- true
}
Пример #28
0
// RegisterHandler takes a function which takes a struct as argument that contains two
// elements: a TreeNode and a message. It will send every message that are the
// same type to this channel.
// This function handles also
// - registration of the message-type
// - aggregation or not of messages: if you give a channel of slices, the
//   messages will be aggregated, else they will come one-by-one
func (n *TreeNodeInstance) RegisterHandler(c interface{}) error {
	flags := uint32(0)
	cr := reflect.TypeOf(c)
	// Check we have the correct channel-type
	if cr.Kind() != reflect.Func {
		return errors.New("Input is not function")
	}
	cr = cr.In(0)
	if cr.Kind() == reflect.Slice {
		flags += AggregateMessages
		cr = cr.Elem()
	}
	if cr.Kind() != reflect.Struct {
		return errors.New("Input is not channel of structure")
	}
	if cr.NumField() != 2 {
		return errors.New("Input is not channel of structure with 2 elements")
	}
	if cr.Field(0).Type != reflect.TypeOf(&TreeNode{}) {
		return errors.New("Input-channel doesn't have TreeNode as element")
	}
	// Automatic registration of the message to the network library.
	typ := network.RegisterMessageUUID(network.RTypeToMessageTypeID(
		cr.Field(1).Type),
		cr.Field(1).Type)
	//typ := network.RTypeToUUID(cr.Elem().Field(1).Type)
	n.handlers[typ] = c
	n.messageTypeFlags[typ] = flags
	log.Lvl3("Registered handler", typ, "with flags", flags)
	return nil
}
Пример #29
0
// startBlockSignature will  send the first signature up the tree.
func (nt *Ntree) startBlockSignature() {
	log.Lvl3(nt.Name(), "Starting Block Signature Phase")
	nt.computeBlockSignature()
	if err := nt.SendTo(nt.Parent(), nt.tempBlockSig); err != nil {
		log.Error(err)
	}

}
Пример #30
0
// Dispatch is an infinite loop to handle messages from channels
func (p *ProtocolExampleChannels) Dispatch() error {
	for {
		select {
		case announcement := <-p.ChannelAnnounce:
			if !p.IsLeaf() {
				// If we have children, send the same message to all of them
				for _, c := range p.Children() {
					err := p.SendTo(c, &announcement.Announce)
					if err != nil {
						log.Error(p.Info(),
							"failed to send to",
							c.Name(), err)
					}
				}
			} else {
				// If we're the leaf, start to reply
				err := p.SendTo(p.Parent(), &Reply{1})
				if err != nil {
					log.Error(p.Info(), "failed to send reply to",
						p.Parent().Name(), err)
				}
				return nil
			}
		case reply := <-p.ChannelReply:
			children := 1
			for _, c := range reply {
				children += c.ChildrenCount
			}
			log.Lvl3(p.ServerIdentity().Addresses, "is done with total of", children)
			if !p.IsRoot() {
				log.Lvl3("Sending to parent")
				err := p.SendTo(p.Parent(), &Reply{children})
				if err != nil {
					log.Error(p.Info(), "failed to reply to",
						p.Parent().Name(), err)
				}
			} else {
				log.Lvl3("Root-node is done - nbr of children found:", children)
				p.ChildCount <- children
			}
			return nil
		}
	}
}