Beispiel #1
0
// TestBlockProposal ensures that node will block proposal when it does not
// know who is the current leader; node will accept proposal when it knows
// who is the current leader.
func TestBlockProposal(t *testing.T) {
	n := newNode()
	r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
	go n.run(r)
	defer n.Stop()

	errc := make(chan error, 1)
	go func() {
		errc <- n.Propose(context.TODO(), []byte("somedata"))
	}()

	testutil.WaitSchedule()
	select {
	case err := <-errc:
		t.Errorf("err = %v, want blocking", err)
	default:
	}

	n.Campaign(context.TODO())
	testutil.WaitSchedule()
	select {
	case err := <-errc:
		if err != nil {
			t.Errorf("err = %v, want %v", err, nil)
		}
	default:
		t.Errorf("blocking proposal, want unblocking")
	}
}
Beispiel #2
0
// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestNodePropose(t *testing.T) {
	msgs := []raftpb.Message{}
	appendStep := func(r *raft, m raftpb.Message) {
		msgs = append(msgs, m)
	}

	n := newNode()
	s := NewMemoryStorage()
	r := newTestRaft(1, []uint64{1}, 10, 1, s)
	go n.run(r)
	n.Campaign(context.TODO())
	for {
		rd := <-n.Ready()
		s.Append(rd.Entries)
		// change the step function to appendStep until this raft becomes leader
		if rd.SoftState.Lead == r.id {
			r.step = appendStep
			n.Advance()
			break
		}
		n.Advance()
	}
	n.Propose(context.TODO(), []byte("somedata"))
	n.Stop()

	if len(msgs) != 1 {
		t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
	}
	if msgs[0].Type != raftpb.MsgProp {
		t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgProp)
	}
	if !reflect.DeepEqual(msgs[0].Entries[0].Data, []byte("somedata")) {
		t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, []byte("somedata"))
	}
}
Beispiel #3
0
// TestMultiNodeProposeConfig ensures that multiNode.ProposeConfChange
// sends the given configuration proposal to the underlying raft.
func TestMultiNodeProposeConfig(t *testing.T) {
	mn := newMultiNode(1)
	go mn.run()
	s := NewMemoryStorage()
	mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
	mn.Campaign(context.TODO(), 1)
	proposed := false
	var lastIndex uint64
	var ccdata []byte
	for {
		rds := <-mn.Ready()
		rd := rds[1]
		s.Append(rd.Entries)
		// change the step function to appendStep until this raft becomes leader
		if !proposed && rd.SoftState.Lead == mn.id {
			cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
			var err error
			ccdata, err = cc.Marshal()
			if err != nil {
				t.Fatal(err)
			}
			mn.ProposeConfChange(context.TODO(), 1, cc)
			proposed = true
		}
		mn.Advance(rds)

		var err error
		lastIndex, err = s.LastIndex()
		if err != nil {
			t.Fatal(err)
		}
		if lastIndex >= 3 {
			break
		}
	}
	mn.Stop()

	entries, err := s.Entries(lastIndex, lastIndex+1, noLimit)
	if err != nil {
		t.Fatal(err)
	}
	if len(entries) != 1 {
		t.Fatalf("len(entries) = %d, want %d", len(entries), 1)
	}
	if entries[0].Type != raftpb.EntryConfChange {
		t.Fatalf("type = %v, want %v", entries[0].Type, raftpb.EntryConfChange)
	}
	if !bytes.Equal(entries[0].Data, ccdata) {
		t.Errorf("data = %v, want %v", entries[0].Data, ccdata)
	}
}
Beispiel #4
0
func BenchmarkProposal3Nodes(b *testing.B) {
	peers := []raft.Peer{{1, nil}, {2, nil}, {3, nil}}
	nt := newRaftNetwork(1, 2, 3)

	nodes := make([]*node, 0)

	for i := 1; i <= 3; i++ {
		n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
		nodes = append(nodes, n)
	}
	// get ready and warm up
	time.Sleep(50 * time.Millisecond)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}

	for _, n := range nodes {
		if n.state.Commit != uint64(b.N+4) {
			continue
		}
	}
	b.StopTimer()

	for _, n := range nodes {
		n.stop()
	}
}
Beispiel #5
0
func (b *bee) createGroup() error {
	c := b.colony()
	if c.IsNil() || c.ID == Nil {
		return fmt.Errorf("%v is in no colony", b)
	}
	cfg := raft.GroupConfig{
		ID:             c.ID,
		Name:           b.String(),
		StateMachine:   b,
		Peers:          b.peers(),
		DataDir:        b.statePath(),
		SnapCount:      1024,
		FsyncTick:      b.hive.config.RaftFsyncTick,
		ElectionTicks:  b.hive.config.RaftElectTicks,
		HeartbeatTicks: b.hive.config.RaftHBTicks,
		MaxInFlights:   b.hive.config.RaftInFlights,
		MaxMsgSize:     b.hive.config.RaftMaxMsgSize,
	}
	if err := b.hive.node.CreateGroup(context.TODO(), cfg); err != nil {
		return err
	}

	if err := b.raftBarrier(); err != nil {
		return err
	}

	b.enableEmit()
	glog.V(2).Infof("%v started its raft node", b)
	return nil
}
Beispiel #6
0
func TestBasicProgress(t *testing.T) {
	peers := []raft.Peer{{1, nil}, {2, nil}, {3, nil}, {4, nil}, {5, nil}}
	nt := newRaftNetwork(1, 2, 3, 4, 5)

	nodes := make([]*node, 0)

	for i := 1; i <= 5; i++ {
		n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
		nodes = append(nodes, n)
	}

	time.Sleep(10 * time.Millisecond)

	for i := 0; i < 10000; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}

	time.Sleep(500 * time.Millisecond)
	for _, n := range nodes {
		n.stop()
		if n.state.Commit != 10006 {
			t.Errorf("commit = %d, want = 10006", n.state.Commit)
		}
	}
}
Beispiel #7
0
// TestNodeStep ensures that node.Step sends msgProp to propc chan
// and other kinds of messages to recvc chan.
func TestNodeStep(t *testing.T) {
	for i, msgn := range raftpb.MessageType_name {
		n := &node{
			propc: make(chan raftpb.Message, 1),
			recvc: make(chan raftpb.Message, 1),
		}
		msgt := raftpb.MessageType(i)
		n.Step(context.TODO(), raftpb.Message{Type: msgt})
		// Proposal goes to proc chan. Others go to recvc chan.
		if msgt == raftpb.MsgProp {
			select {
			case <-n.propc:
			default:
				t.Errorf("%d: cannot receive %s on propc chan", msgt, msgn)
			}
		} else {
			if msgt == raftpb.MsgBeat || msgt == raftpb.MsgHup || msgt == raftpb.MsgUnreachable || msgt == raftpb.MsgSnapStatus {
				select {
				case <-n.recvc:
					t.Errorf("%d: step should ignore %s", msgt, msgn)
				default:
				}
			} else {
				select {
				case <-n.recvc:
				default:
					t.Errorf("%d: cannot receive %s on recvc chan", msgt, msgn)
				}
			}
		}
	}
}
Beispiel #8
0
func (n *node) start() {
	n.stopc = make(chan struct{})
	ticker := time.Tick(5 * time.Millisecond)

	go func() {
		for {
			select {
			case <-ticker:
				n.Tick()
			case rd := <-n.Ready():
				if !raft.IsEmptyHardState(rd.HardState) {
					n.state = rd.HardState
					n.storage.SetHardState(n.state)
				}
				n.storage.Append(rd.Entries)
				time.Sleep(time.Millisecond)
				// TODO: make send async, more like real world...
				for _, m := range rd.Messages {
					n.iface.send(m)
				}
				n.Advance()
			case m := <-n.iface.recv():
				n.Step(context.TODO(), m)
			case <-n.stopc:
				n.Stop()
				log.Printf("raft.%d: stop", n.id)
				n.Node = nil
				close(n.stopc)
				return
			case p := <-n.pausec:
				recvms := make([]raftpb.Message, 0)
				for p {
					select {
					case m := <-n.iface.recv():
						recvms = append(recvms, m)
					case p = <-n.pausec:
					}
				}
				// step all pending messages
				for _, m := range recvms {
					n.Step(context.TODO(), m)
				}
			}
		}
	}()
}
Beispiel #9
0
// TestMultiNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestMultiNodePropose(t *testing.T) {
	mn := newMultiNode(1)
	go mn.run()
	s := NewMemoryStorage()
	mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
	mn.Campaign(context.TODO(), 1)
	proposed := false
	for {
		rds := <-mn.Ready()
		rd := rds[1]
		s.Append(rd.Entries)
		// Once we are the leader, propose a command.
		if !proposed && rd.SoftState.Lead == mn.id {
			mn.Propose(context.TODO(), 1, []byte("somedata"))
			proposed = true
		}
		mn.Advance(rds)

		// Exit when we have three entries: one ConfChange, one no-op for the election,
		// and our proposed command.
		lastIndex, err := s.LastIndex()
		if err != nil {
			t.Fatal(err)
		}
		if lastIndex >= 3 {
			break
		}
	}
	mn.Stop()

	lastIndex, err := s.LastIndex()
	if err != nil {
		t.Fatal(err)
	}
	entries, err := s.Entries(lastIndex, lastIndex+1, noLimit)
	if err != nil {
		t.Fatal(err)
	}
	if len(entries) != 1 {
		t.Fatalf("len(entries) = %d, want %d", len(entries), 1)
	}
	if !bytes.Equal(entries[0].Data, []byte("somedata")) {
		t.Errorf("entries[0].Data = %v, want %v", entries[0].Data, []byte("somedata"))
	}
}
Beispiel #10
0
// TestProposeUnknownGroup ensures that we gracefully handle proposals
// for groups we don't know about (which can happen on a former leader
// that has been removed from the group).
//
// It is analogous to TestBlockProposal from node_test.go but in
// MultiNode we cannot block proposals based on individual group
// leader status.
func TestProposeUnknownGroup(t *testing.T) {
	mn := newMultiNode(1)
	go mn.run()
	defer mn.Stop()

	// A nil error from Propose() doesn't mean much. In this case the
	// proposal will be dropped on the floor because we don't know
	// anything about group 42. This is a very crude test that mainly
	// guarantees that we don't panic in this case.
	if err := mn.Propose(context.TODO(), 42, []byte("somedata")); err != nil {
		t.Errorf("err = %v, want nil", err)
	}
}
Beispiel #11
0
func TestPause(t *testing.T) {
	peers := []raft.Peer{{1, nil}, {2, nil}, {3, nil}, {4, nil}, {5, nil}}
	nt := newRaftNetwork(1, 2, 3, 4, 5)

	nodes := make([]*node, 0)

	for i := 1; i <= 5; i++ {
		n := startNode(uint64(i), peers, nt.nodeNetwork(uint64(i)))
		nodes = append(nodes, n)
	}

	time.Sleep(50 * time.Millisecond)
	for i := 0; i < 300; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}
	nodes[1].pause()
	for i := 0; i < 300; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}
	nodes[2].pause()
	for i := 0; i < 300; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}
	nodes[2].resume()
	for i := 0; i < 300; i++ {
		nodes[0].Propose(context.TODO(), []byte("somedata"))
	}
	nodes[1].resume()

	// give some time for nodes to catch up with the raft leader
	time.Sleep(300 * time.Millisecond)
	for _, n := range nodes {
		n.stop()
		if n.state.Commit != 1206 {
			t.Errorf("commit = %d, want = 1206", n.state.Commit)
		}
	}
}
Beispiel #12
0
func (h *HelloHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	vars := mux.Vars(r)
	name, ok := vars["name"]
	if !ok {
		http.Error(w, "no name", http.StatusBadRequest)
		return
	}

	res, err := beehive.Sync(context.TODO(), HelloHTTP{Name: name})
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	fmt.Fprintf(w, "hello %s (%d)\n", name, res.(int))
}
Beispiel #13
0
func (h *hive) handleCmd(cc cmdAndChannel) {
	glog.V(2).Infof("%v handles cmd %+v", h, cc.cmd)
	switch d := cc.cmd.Data.(type) {
	case cmdStop:
		// TODO(soheil): This has a race with Stop(). Use atomics here.
		h.status = hiveStopped
		h.stopListener()
		h.stopQees()
		h.node.Stop()
		cc.ch <- cmdResult{}

	case cmdPing:
		cc.ch <- cmdResult{}

	case cmdSync:
		err := h.raftBarrier()
		cc.ch <- cmdResult{Err: err}

	case cmdNewHiveID:
		r, err := h.node.ProposeRetry(hiveGroup, newHiveID{},
			h.config.RaftElectTimeout(), 10)
		cc.ch <- cmdResult{
			Data: r,
			Err:  err,
		}

	case cmdAddHive:
		err := h.node.AddNodeToGroup(context.TODO(), d.Hive.ID, hiveGroup,
			d.Hive.Addr)
		cc.ch <- cmdResult{
			Err: err,
		}

	case cmdLiveHives:
		cc.ch <- cmdResult{
			Data: h.registry.hives(),
		}

	default:
		cc.ch <- cmdResult{
			Err: ErrInvalidCmd,
		}
	}
}
Beispiel #14
0
func defaultHTTPHandler(w http.ResponseWriter, r *http.Request, h bh.Hive) {
	w.Header().Set("Server", "Beehive-netctrl-HTTP-Server")

	vars := mux.Vars(r)

	submodule, ok := vars["submodule"]
	if !ok {
		/* This should not happened :) */
		http.Error(w, "Invalid request", http.StatusBadRequest)
		return
	}

	verb, ok := vars["verb"]
	if !ok {
		/* This should not happened :) */
		http.Error(w, "Invalid request", http.StatusBadRequest)
		return
	}

	creq := HTTPRequest{
		AppName: submodule,
		Verb:    verb,
	}

	// Read content data if available :)
	if r.ContentLength > 0 {
		data, err := ioutil.ReadAll(r.Body)
		if err == nil {
			creq.Data = data
		}
	}

	cres, err := h.Sync(context.TODO(), creq)
	if err == nil && cres != nil {
		w.Write(cres.(HTTPResponse).Data)
		w.WriteHeader(http.StatusOK)
		return
	} else {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		fmt.Errorf("defaultTTPHandler: %v\n", err)
		return
	}

}
Beispiel #15
0
// Cancel and Stop should unblock Step()
func TestNodeStepUnblock(t *testing.T) {
	// a node without buffer to block step
	n := &node{
		propc: make(chan raftpb.Message),
		done:  make(chan struct{}),
	}

	ctx, cancel := context.WithCancel(context.Background())
	stopFunc := func() { close(n.done) }

	tests := []struct {
		unblock func()
		werr    error
	}{
		{stopFunc, ErrStopped},
		{cancel, context.Canceled},
	}

	for i, tt := range tests {
		errc := make(chan error, 1)
		go func() {
			err := n.Step(ctx, raftpb.Message{Type: raftpb.MsgProp})
			errc <- err
		}()
		tt.unblock()
		select {
		case err := <-errc:
			if err != tt.werr {
				t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
			}
			//clean up side-effect
			if ctx.Err() != nil {
				ctx = context.TODO()
			}
			select {
			case <-n.done:
				n.done = make(chan struct{})
			default:
			}
		case <-time.After(time.Millisecond * 100):
			t.Errorf("#%d: failed to unblock step", i)
		}
	}
}
func (httpReceiver *HTTPReceiver) ServeHTTP(
	responseWriter http.ResponseWriter,
	httpRequest *http.Request) {
	logger.Info.Printf("[HTTPReceiver] ServeHTTP %s %s \n",
		httpRequest.Method,
		httpRequest.URL)
	vars := mux.Vars(httpRequest)
	destinationBee, ok := vars["destinationBee"]
	if !ok {
		BadRequest(responseWriter, "No destinationBee")
		return
	}

	fibNumberStr, ok := vars["fibNumber"]
	if !ok {
		BadRequest(responseWriter, "No fibNumber")
		return
	}

	fibNumber, err := strconv.Atoi(fibNumberStr)
	if err != nil {
		BadRequest(responseWriter, "FibNumber must be number")
		return
	}

	message := MessageToBee{
		DestinationBee: destinationBee,
		FibNumber:      fibNumber,
	}
	logger.Trace.Printf("[HTTPReceiver] Message to bee %+v \n", message)

	beeRespond, err := beehive.Sync(context.TODO(), message)
	if err != nil {
		logger.Error.Printf("[HTTPReceiver] %s \n", err.Error())
		http.Error(responseWriter, err.Error(), http.StatusInternalServerError)
		return
	}

	fmt.Fprintf(responseWriter, "%d", beeRespond.(int))
	logger.Trace.Println("[HTTPReceiver] Done sending message to bee")
}
Beispiel #17
0
func (h *hive) startRaftNode() {
	peers := make([]etcdraft.Peer, 0, 1)
	if len(h.meta.Peers) != 0 {
		h.registry.initHives(h.meta.Peers)
	} else {
		i := h.info()
		ni := raft.GroupNode{
			Group: hiveGroup,
			Node:  i.ID,
			Data:  i.Addr,
		}
		peers = append(peers, ni.Peer())
	}

	h.ticker = randtime.NewTicker(h.config.RaftTick, h.config.RaftTickDelta)

	ncfg := raft.Config{
		ID:     h.id,
		Name:   h.String(),
		Send:   h.sendRaft,
		Ticker: h.ticker.C,
	}
	h.node = raft.StartMultiNode(ncfg)

	gcfg := raft.GroupConfig{
		ID:             hiveGroup,
		Name:           h.String(),
		StateMachine:   h.registry,
		Peers:          peers,
		DataDir:        h.config.StatePath,
		SnapCount:      1024,
		FsyncTick:      h.config.RaftFsyncTick,
		ElectionTicks:  h.config.RaftElectTicks,
		HeartbeatTicks: h.config.RaftHBTicks,
		MaxInFlights:   h.config.RaftInFlights,
		MaxMsgSize:     h.config.RaftMaxMsgSize,
	}
	if err := h.node.CreateGroup(context.TODO(), gcfg); err != nil {
		glog.Fatalf("cannot create hive group: %v", err)
	}
}
Beispiel #18
0
func Example_reply() {
	// Create the hello world application and make sure .
	app := beehive.NewApp("hello-world", beehive.Persistent(1))
	// Register the handler for Hello messages.
	app.HandleFunc(HelloReply{}, beehive.RuntimeMap(RcvfReply), RcvfReply)

	// Start the default hive.
	go beehive.Start()
	defer beehive.Stop()

	name := "your name"
	for i := 0; i < 2; i++ {
		// Sync sends the Hello message and waits until it receives the reply.
		res, err := beehive.Sync(context.TODO(), HelloReply{Name: name})
		if err != nil {
			glog.Fatalf("error in sending Hello: %v", err)
		}
		cnt := res.(int)
		fmt.Printf("hello %s (%d)!\n", name, cnt)
	}
}
Beispiel #19
0
func (r Router) Rcv(msg bh.Msg, ctx bh.RcvContext) error {

	switch dm := msg.Data().(type) {
	case setup:
		return registerEndhosts(ctx)
	case nom.LinkAdded:
		link := InterAreaLink(dm)
		ctx.Emit(link)
		return r.GraphBuilderCentralized.Rcv(msg, ctx)
	case nom.LinkDeleted:
		return r.GraphBuilderCentralized.Rcv(msg, ctx)
	default:
		in := msg.Data().(nom.PacketIn)
		src := in.Packet.SrcMAC()
		dst := in.Packet.DstMAC()

		d := ctx.Dict(mac2port)

		if dst.IsLLDP() {
			return nil
		}

		// FIXME: Hardcoding the hardware address at the moment
		srck := src.Key()
		_, src_err := d.Get(srck)
		if src_err != nil {
			fmt.Printf("Router: Error retrieving hosts %v\n", src)
		}

		if dst.IsBroadcast() || dst.IsMulticast() {
			fmt.Printf("Router: Received Broadcast or Multicast from %v\n", src)
			return nil
		}

		sn := in.Node

		dstk := dst.Key()
		dst_port, dst_err := d.Get(dstk)
		if dst_err != nil {
			fmt.Printf("Router: Cant find dest node %v\n", dstk)
			res, query_err := ctx.Sync(context.TODO(), InterAreaQuery{Src: srck, Dst: dstk})
			if query_err != nil {
				fmt.Printf("Router: received error when querying! %v\n", query_err)
			}
			fmt.Printf("Router: received response succesfully - %v\n", res)
			dst_port = res.(nom.UID)
		}
		dn, _ := nom.ParsePortUID(dst_port.(nom.UID))
		p := dst_port.(nom.UID)

		if sn != nom.UID(dn) {

			paths, shortest_len := discovery.ShortestPathCentralized(sn, nom.UID(dn), ctx)
			fmt.Printf("Router: Path between %v and %v returns %v, %v\n", sn, nom.UID(dn), paths, shortest_len)

			for _, path := range paths {
				if len(path) != shortest_len {
					continue
				} else {

					p = path[0].From
					break
				}
			}
		}

		// Forward flow entry
		add_forward := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: dst,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{p},
					},
				},
			},
		}
		ctx.Reply(msg, add_forward)

		// Reverse flow entry
		add_reverse := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: src,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{in.InPort},
					},
				},
			},
		}
		ctx.Reply(msg, add_reverse)

		out := nom.PacketOut{
			Node:     in.Node,
			InPort:   in.InPort,
			BufferID: in.BufferID,
			Packet:   in.Packet,
			Actions: []nom.Action{
				nom.ActionForward{
					Ports: []nom.UID{p},
				},
			},
		}
		ctx.Reply(msg, out)
	}

	return nil

}
Beispiel #20
0
func (r RouterM) Rcv(msg bh.Msg, ctx bh.RcvContext) error {

	switch dm := msg.Data().(type) {
	case area_setup:
		fmt.Printf("Adding to ctx: %v\n", ctx)
		d := ctx.Dict(cDict)
		d.Put("master", "master")
		return master_setup(ctx)
	case InterAreaLink:
		link := nom.Link(dm)
		nf, _ := nom.ParsePortUID(link.From)
		k := string(nf)
		nt, _ := nom.ParsePortUID(link.To)
		k2 := string(nt)

		d := ctx.Dict(node2area)
		nf_area, err := d.Get(k)
		nt_area, err2 := d.Get(k2)

		if err != nil || err2 != nil {
			fmt.Printf("Node does not belong to any existing areas! %v, %v, %v\n", err, err2, ctx)
			return nil
		}

		if nf_area != nt_area {
			link.From = nom.UID(nf_area.(string) + "$$" + strings.Replace(string(link.From), "$$", tmpD, 1))
			link.To = nom.UID(nt_area.(string) + "$$" + strings.Replace(string(link.To), "$$", tmpD, 1))
			fmt.Printf("Received Links from different areas, building graphs for %v, %v\n", link.From, link.To)
			ctx.Emit(nom.LinkAdded(link))
		}
		return nil
	case InterAreaQuery:

		query := msg.Data().(InterAreaQuery)
		srck := query.Src
		dstk := query.Dst
		d := ctx.Dict(mac2area)

		src_area, src_err := d.Get(srck)
		dst_area, dst_err := d.Get(dstk)

		if src_err != nil || dst_err != nil {
			fmt.Printf("Error retriving area info: %v, %v\n", src_err, dst_err)
			return nil
		}

		paths, shortest_len := discovery.ShortestPathCentralized(nom.UID(src_area.(string)), nom.UID(dst_area.(string)), ctx)

		if shortest_len <= 0 {
			fmt.Printf("No route exists between area %v and area %v\n", src_area, dst_area)
			return nil
		}

		fmt.Printf("Path between area %v and area %v returned %v\n", src_area, dst_area, paths)

		for _, path := range paths {
			if len(path) != shortest_len {
				continue
			} else {
				_, port := nom.ParsePortUID(path[0].From)
				port_conv := strings.Replace(string(port), tmpD, "$$", 1)
				fmt.Printf("Sending converted port: %v\n", port_conv)
				ctx.Reply(msg, nom.UID(port_conv))
				break
			}
		}

		return nil
	case setupM:
		m := msg.Data().(setupM)
		return registerEndhostsAll(ctx, m.area)
	case nom.LinkAdded:
		// link := InterAreaLink(dm)
		cd := ctx.Dict(cDict)
		_, cerr := cd.Get("master")
		if cerr != nil {
			// ctx.Emit(link)
			return r.GraphBuilderCentralized.Rcv(msg, ctx)
		} else {
			link := nom.Link(dm)
			nf, _ := nom.ParsePortUID(link.From)
			k := string(nf)
			nt, _ := nom.ParsePortUID(link.To)
			k2 := string(nt)

			d := ctx.Dict(node2area)
			nf_area, err := d.Get(k)
			nt_area, err2 := d.Get(k2)

			if err != nil || err2 != nil {
				fmt.Printf("Node does not belong to any existing areas! %v, %v, %v\n", err, err2, ctx)
				return nil
			}

			if nf_area != nt_area {
				link.From = nom.UID(nf_area.(string) + "$$" + strings.Replace(string(link.From), "$$", tmpD, 1))
				link.To = nom.UID(nt_area.(string) + "$$" + strings.Replace(string(link.To), "$$", tmpD, 1))
				fmt.Printf("Received Links from different areas, building graphs for %v, %v\n", link.From, link.To)
				InterAreaLinkAdded(link, ctx)
			}
		}
	case nom.LinkDeleted:
		return r.GraphBuilderCentralized.Rcv(msg, ctx)
	default:
		in := msg.Data().(nom.PacketIn)
		src := in.Packet.SrcMAC()
		dst := in.Packet.DstMAC()

		d := ctx.Dict(mac2port)

		if dst.IsLLDP() {
			return nil
		}

		// FIXME: Hardcoding the hardware address at the moment
		srck := src.Key()
		_, src_err := d.Get(srck)
		if src_err != nil {
			fmt.Printf("Router: Error retrieving hosts %v\n", src)
		}

		if dst.IsBroadcast() || dst.IsMulticast() {
			fmt.Printf("Router: Received Broadcast or Multicast from %v\n", src)
			return nil
		}

		sn := in.Node

		dstk := dst.Key()
		dst_port, dst_err := d.Get(dstk)
		if dst_err != nil {
			fmt.Printf("Router: Cant find dest node %v\n", dstk)
			res, query_err := ctx.Sync(context.TODO(), InterAreaQuery{Src: srck, Dst: dstk})
			if query_err != nil {
				fmt.Printf("Router: received error when querying! %v\n", query_err)
			}
			fmt.Printf("Router: received response succesfully - %v\n", res)
			dst_port = res.(nom.UID)
		}
		dn, _ := nom.ParsePortUID(dst_port.(nom.UID))
		p := dst_port.(nom.UID)

		if sn != nom.UID(dn) {

			paths, shortest_len := discovery.ShortestPathCentralized(sn, nom.UID(dn), ctx)
			fmt.Printf("Router: Path between %v and %v returns %v, %v\n", sn, nom.UID(dn), paths, shortest_len)

			for _, path := range paths {
				if len(path) != shortest_len {
					continue
				} else {

					p = path[0].From
					break
				}
			}
		}

		// Forward flow entry
		add_forward := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: dst,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{p},
					},
				},
			},
		}
		ctx.Reply(msg, add_forward)

		// Reverse flow entry
		add_reverse := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: src,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{in.InPort},
					},
				},
			},
		}
		ctx.Reply(msg, add_reverse)

		out := nom.PacketOut{
			Node:     in.Node,
			InPort:   in.InPort,
			BufferID: in.BufferID,
			Packet:   in.Packet,
			Actions: []nom.Action{
				nom.ActionForward{
					Ports: []nom.UID{p},
				},
			},
		}
		ctx.Reply(msg, out)
	}

	return nil

}
Beispiel #21
0
func (r LoadBalancer) Rcv(msg bh.Msg, ctx bh.RcvContext) error {

	switch dm := msg.Data().(type) {
	case setup:
		return registerEndhosts2(ctx)
	case nom.LinkAdded:
		link := InterAreaLink(dm)
		ctx.Emit(link)
		return r.GraphBuilderCentralized.Rcv(msg, ctx)
	case nom.LinkDeleted:
		return r.GraphBuilderCentralized.Rcv(msg, ctx)
	default:
		in := msg.Data().(nom.PacketIn)
		src := in.Packet.SrcMAC()
		dst := in.Packet.DstMAC()

		d := ctx.Dict(mac2port)
		load_dict := ctx.Dict(load_on_nodes)

		if dst.IsLLDP() {
			return nil
		}

		// FIXME: Hardcoding the hardware address at the moment
		srck := src.Key()

		if dst.IsBroadcast() || dst.IsMulticast() {
			fmt.Printf("Load Balancer: Received Broadcast or Multicast from %v\n", src)
			return nil
		}

		sn := in.Node

		dstk := dst.Key()
		dst_port, dst_err := d.Get(dstk)
		if dst_err != nil {
			fmt.Printf("Load Balancer: Cant find dest node %v\n", dstk)
			res, query_err := ctx.Sync(context.TODO(), InterAreaQuery{Src: srck, Dst: dstk})
			if query_err != nil {
				fmt.Printf("Load Balancer: received error when querying! %v\n", query_err)
			}
			fmt.Printf("Load Balancer: received response succesfully - %v\n", res)
			dst_port = res.(nom.UID)
		}
		dn, _ := nom.ParsePortUID(dst_port.(nom.UID))
		p := dst_port.(nom.UID)

		if sn != nom.UID(dn) {

			paths, _ := discovery.ShortestPathCentralized(sn, nom.UID(dn), ctx)
			opt_path := paths[0]
			min_load := calculate_load(ctx, paths[0])
			for _, path := range paths[1:] {

				load := calculate_load(ctx, path)
				if load < min_load {
					opt_path = path
					min_load = load
				}
			}

			fmt.Printf("Load Balancer: Routing flow from %v to %v - %v, %v\n", sn, nom.UID(dn), opt_path, len(opt_path))
			p = opt_path[0].From
		}

		// Forward flow entry
		add_forward := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: dst,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{p},
					},
				},
			},
		}
		ctx.Reply(msg, add_forward)

		// Reverse flow entry
		add_reverse := nom.AddFlowEntry{
			Flow: nom.FlowEntry{
				Node: in.Node,
				Match: nom.Match{
					Fields: []nom.Field{
						nom.EthDst{
							Addr: src,
							Mask: nom.MaskNoneMAC,
						},
					},
				},
				Actions: []nom.Action{
					nom.ActionForward{
						Ports: []nom.UID{in.InPort},
					},
				},
			},
		}
		ctx.Reply(msg, add_reverse)

		// Updating the load on this node
		// FIXME: This is a naive approach, ideally should update when flowentry
		// is added/removed, but flowentry deleted is not implemented yet
		if load, err := load_dict.Get(string(in.Node)); err == nil {
			load_dict.Put(string(in.Node), load.(int)+1)
		} else {
			load_dict.Put(string(in.Node), 1)
		}

		out := nom.PacketOut{
			Node:     in.Node,
			InPort:   in.InPort,
			BufferID: in.BufferID,
			Packet:   in.Packet,
			Actions: []nom.Action{
				nom.ActionForward{
					Ports: []nom.UID{p},
				},
			},
		}
		ctx.Reply(msg, out)
	}

	return nil

}
Beispiel #22
0
func (n *MultiNode) start() {
	glog.V(2).Infof("%v started", n)

	defer func() {
		n.node.Stop()
		close(n.done)
	}()

	readyc := n.node.Ready()
	groupc := n.groupc
	for {
		select {
		case <-n.ticker:
			n.node.Tick()

		case bt := <-n.recvc:
			ch := time.After(1 * time.Millisecond)
			n.handleBatch(bt)
		loopr:
			for {
				select {
				case bt := <-n.recvc:
					n.handleBatch(bt)
				case <-ch:
					break loopr
				default:
					break loopr
				}
			}

		case mm := <-n.propc:
			ch := time.After(1 * time.Millisecond)
			n.node.Step(context.TODO(), mm.group, mm.msg)
		loopp:
			for {
				select {
				case mm := <-n.propc:
					n.node.Step(context.TODO(), mm.group, mm.msg)
				case <-ch:
					break loopp
				default:
					break loopp
				}
			}

		case req := <-groupc:
			n.handleGroupRequest(req)

		case rd := <-readyc:
			groupc = nil
			readyc = nil
			n.applyc <- rd

		case rd := <-n.advancec:
			readyc = n.node.Ready()
			groupc = n.groupc
			n.node.Advance(rd)

		case <-n.stop:
			return
		}
	}
}