func (h *hive) stopQees() { glog.Infof("%v is stopping qees...", h) qs := make(map[*qee]bool) for _, mhs := range h.qees { for _, mh := range mhs { qs[mh.q] = true } } stopCh := make(chan cmdResult) for q := range qs { q.ctrlCh <- newCmdAndChannel(cmdStop{}, h.ID(), q.app.Name(), 0, stopCh) glog.V(3).Infof("waiting on a qee: %v", q) stopped := false tries := 5 for !stopped { select { case res := <-stopCh: _, err := res.get() if err != nil { glog.Errorf("error in stopping a qee: %v", err) } stopped = true case <-time.After(1 * time.Second): if tries--; tries < 0 { glog.Infof("giving up on qee %v", q) stopped = true continue } glog.Infof("still waiting for a qee %v...", q) } } } }
func hiveIDFromPeers(addr string, paddrs []string) uint64 { if len(paddrs) == 0 { return 1 } ch := make(chan uint64, len(paddrs)) for _, paddr := range paddrs { glog.Infof("requesting hive ID from %v", paddr) go func(paddr string) { c, err := newRPCClient(paddr) if err != nil { glog.Error(err) return } defer c.stop() id, err := c.sendCmd(cmd{Data: cmdNewHiveID{}}) if err != nil { glog.Error(err) return } if id == Nil { glog.Fatalf("invalid ID from peer") } _, err = c.sendCmd(cmd{ Data: cmdAddHive{ Hive: HiveInfo{ ID: id.(uint64), Addr: addr, }, }, }) if err != nil { glog.Error(err) return } ch <- id.(uint64) }(paddr) select { case id := <-ch: return id case <-time.After(1 * time.Second): glog.Infof("cannot get id from %v", paddr) continue } } glog.Fatalf("cannot get a new hive ID from peers") return 1 }
func (c *Collector) Rcv(m beehive.Msg, ctx beehive.RcvContext) error { res := m.Data().(StatResult) glog.V(2).Infof("Stat results: %+v", res) matrix := ctx.Dict(matrixDict) key := res.Switch.Key() v, err := matrix.Get(key) if err != nil { return fmt.Errorf("No such switch in matrix: %+v", res) } c.poller.query <- StatQuery{res.Switch} sw := v.(SwitchStats) stat, ok := sw[res.Flow] sw[res.Flow] = res.Bytes glog.V(2).Infof("Previous stats: %+v, Now: %+v", stat, res.Bytes) if !ok || res.Bytes-stat > c.delta { glog.Infof("Found an elephent flow: %+v, %+v, %+v", res, stat, ctx.Hive().ID()) ctx.Emit(MatrixUpdate(res)) } matrix.Put(key, sw) return nil }
func (c *ofConn) doRead(done chan struct{}, stop chan struct{}) { defer close(done) pkts := make([]of.Header, c.readBufLen) for { select { case <-stop: return default: } n, err := c.ReadHeaders(pkts) if err != nil { if err == io.EOF { glog.Infof("connection %v closed", c.RemoteAddr()) } else { glog.Errorf("cannot read from the connection %v: %v", c.RemoteAddr(), err) } return } for _, pkt := range pkts[:n] { if err := c.driver.handlePkt(pkt, c); err != nil { glog.Errorf("%s", err) return } } pkts = pkts[n:] if len(pkts) == 0 { pkts = make([]of.Header, c.readBufLen) } } }
func createHive(addr string, paddrs []string, minDriver, maxDriver int, minCol, maxCol int, stickyCollector bool, lockRouter bool, joinCh chan bool) { h := beehive.NewHive(beehive.Addr(addr), beehive.PeerAddrs(paddrs...)) cOps := []beehive.AppOption{} if stickyCollector { cOps = append(cOps, beehive.Sticky()) } c := h.NewApp("Collector", cOps...) p := NewPoller(1 * time.Second) c.Detached(p) c.Handle(StatResult{}, &Collector{uint64(maxSpike * (1 - elephantProb)), p}) c.Handle(SwitchJoined{}, &SwitchJoinHandler{p}) r := h.NewApp("Router", beehive.Sticky()) r.Handle(MatrixUpdate{}, &UpdateHandler{}) d := h.NewApp("Driver", beehive.Sticky()) driver := NewDriver(minDriver, maxDriver-minDriver) d.Handle(StatQuery{}, driver) d.Handle(FlowMod{}, driver) if lockRouter { h.Emit(MatrixUpdate{}) } if maxDriver != minDriver { glog.Infof("Running driver from %d to %d\n", minDriver, maxDriver-1) d.Detached(driver) for i := minDriver; i < maxDriver; i++ { h.Emit(StatQuery{Switch(i)}) } } if maxCol != minCol { glog.Infof("Running collector from %d to %d\n", minCol, maxCol-1) for i := minCol; i < maxCol; i++ { h.Emit(SwitchJoined{Switch(i)}) } } h.RegisterMsg(SwitchStats{}) go func() { h.Start() <-joinCh }() }
func (h *UpdateHandler) Rcv(m beehive.Msg, ctx beehive.RcvContext) error { if m.NoReply() { return nil } u := m.Data().(MatrixUpdate) glog.Infof("Received matrix update: %+v", u) ctx.Emit(FlowMod{Switch: u.Switch}) return nil }
func (of *of10Driver) handlePacketIn(in of10.PacketIn, c *ofConn) error { inPort := in.InPort() // Ignore packet-ins on switch specific ports. if inPort > uint16(of10.PP_MAX) { glog.V(2).Infof("ignoring packet-in on %v", inPort) return nil } port, ok := of.ofPorts[inPort] if !ok { return fmt.Errorf("of10Driver: port not found %v", inPort) } if glog.V(2) { glog.Infof("packet received: %v", in) } nomIn := nom.PacketIn{ Node: c.node.UID(), InPort: port.UID(), BufferID: nom.PacketBufferID(in.BufferId()), } nomIn.Packet = nom.Packet(in.Data()) c.ctx.Emit(nomIn) //c.ctx.Emit(in) //buf := make([]byte, 32) //out := of10.NewPacketOutWithBuf(buf) //out.Init() //out.SetBufferId(in.BufferId()) //out.SetInPort(in.InPort()) //bcast := of10.NewActionOutput() //bcast.SetPort(uint16(of10.PP_FLOOD)) //out.AddActions(bcast.ActionHeader) //if in.BufferId() == 0xFFFFFFFF { //for _, d := range in.Data() { //out.AddData(d) //} //} else { //out.SetBufferId(in.BufferId()) //} //c.wCh <- out.Header //if err := c.WriteHeader(out.Header); err != nil { //return fmt.Errorf("Error in writing a packet out: %v", err) //} return nil }
func (h *hive) Stop() error { glog.Infof("stopping %v", h) if h.ctrlCh == nil { return errors.New("control channel is closed") } if h.status == hiveStopped { return errors.New("hive is already stopped") } _, err := h.processCmd(cmdStop{}) return err }
func (g *group) snapshot() { d, err := g.stateMachine.Save() if err != nil { glog.Fatalf("error in seralizing the state machine: %v", err) } g.snapped = g.applied go func(snapi uint64) { snap, err := g.raftStorage.CreateSnapshot(snapi, &g.confState, d) if err != nil { // the snapshot was done asynchronously with the progress of raft. // raft might have already got a newer snapshot. if err == etcdraft.ErrSnapOutOfDate { return } glog.Fatalf("unexpected create snapshot error %v", err) } if err := g.diskStorage.SaveSnap(snap); err != nil { glog.Fatalf("save snapshot error: %v", err) } glog.Infof("%v saved snapshot at index %d", g, snap.Metadata.Index) // keep some in memory log entries for slow followers. compacti := uint64(1) if snapi > numberOfCatchUpEntries { compacti = snapi - numberOfCatchUpEntries } if err = g.raftStorage.Compact(compacti); err != nil { // the compaction was done asynchronously with the progress of raft. // raft log might already been compact. if err == etcdraft.ErrCompacted { return } glog.Fatalf("unexpected compaction error %v", err) } glog.Infof("%v compacted raft log at %d", g, compacti) }(g.snapped) }
func (h *hive) listen() (err error) { h.listener, err = net.Listen("tcp", h.config.Addr) if err != nil { glog.Errorf("%v cannot listen: %v", h, err) return err } glog.Infof("%v is listening", h) m := cmux.New(h.listener) hl := m.Match(cmux.HTTP1Fast()) rl := m.Match(cmux.Any()) go func() { h.httpServer.Serve(hl) glog.Infof("%v closed http listener", h) }() rs := rpc.NewServer() if err := rs.RegisterName("rpcServer", newRPCServer(h)); err != nil { glog.Fatalf("cannot register rpc server: %v", err) } go func() { for { conn, err := rl.Accept() if err != nil { glog.Infof("%v closed rpc listener", h) return } go rs.ServeConn(conn) } }() go m.Serve() return nil }
func (of *of12Driver) handlePacketIn(in of12.PacketIn, c *ofConn) error { m := in.Match() if m.Type() == uint16(of12.PMT_STANDARD) { glog.Warningf("standard matches are not supported") return nil } var inPort uint32 hasInPort := false xm, _ := of12.ToOXMatch(in.Match()) for _, f := range xm.Fields() { if of12.IsOxmInPort(f) { xp, _ := of12.ToOxmInPort(f) inPort = xp.InPort() hasInPort = true } } if !hasInPort { glog.V(2).Infof("packet in does not have an input port") return nil } // Ignore packet-ins on switch specific ports. if inPort > uint32(of12.PP_MAX) { glog.V(2).Infof("ignoring packet-in on %v", inPort) return nil } port, ok := of.ofPorts[inPort] if !ok { return fmt.Errorf("of12Driver: port not found %v", inPort) } if glog.V(2) { glog.Infof("packet received: %v", in) } nomIn := nom.PacketIn{ Node: c.node.UID(), InPort: port.UID(), BufferID: nom.PacketBufferID(in.BufferId()), } nomIn.Packet = nom.Packet(in.Data()) c.ctx.Emit(nomIn) return nil }
func (l *ofListener) Start(ctx bh.RcvContext) { nl, err := net.Listen(l.proto, l.addr) if err != nil { glog.Errorf("Cannot start the OF listener: %v", err) return } glog.Infof("OF listener started on %s:%s", l.proto, l.addr) defer func() { glog.Infof("OF listener closed") nl.Close() }() for { c, err := nl.Accept() if err != nil { glog.Errorf("Error in OF accept: %v", err) return } l.startOFConn(c, ctx) } }
func (b *bee) handleMsgLeader(mhs []msgAndHandler) { usetx := b.app.transactional() if usetx && len(mhs) > 1 { b.stateL2 = state.NewTransactional(b.stateL1) b.stateL1.BeginTx() } for i := range mhs { if usetx { b.BeginTx() } mh := mhs[i] if glog.V(2) { glog.Infof("%v handles message %v", b, mh.msg) } b.callRcv(mh) if usetx { var err error if b.stateL2 == nil { err = b.CommitTx() } else if len(b.msgBufL1) == 0 && b.stateL2.HasEmptyTx() { // If there is no pending L1 message and there is no state change, // emit the buffered messages in L2 as a shortcut. b.throttle(b.msgBufL2) b.resetTx(b.stateL2, &b.msgBufL2) } else { err = b.commitTxL2() } if err != nil && err != state.ErrNoTx { glog.Errorf("%v cannot commit a transaction: %v", b, err) } } } if !usetx || b.stateL2 == nil { return } b.stateL2 = nil if err := b.CommitTx(); err != nil && err != state.ErrNoTx { glog.Errorf("%v cannot commit a transaction: %v", b, err) } }
func (h *ProtoHandler) Start(ctx beehive.RcvContext) { defer close(h.done) glog.Infof("taskq is listening on: %s", h.lis.Addr()) for { c, err := h.lis.Accept() if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Temporary() { continue } return } // TODO(soheil): do we need to be graceful for connections? go ctx.StartDetached(NewConnHandler(c)) } }
func (c *ofConn) handshake() (ofDriver, error) { hdr, err := c.ReadHeader() if err != nil { return nil, err } h, err := of.ToHello(hdr) if err != nil { return nil, err } glog.Infof("%v received hello from a switch with OFv%v", c.ctx, h.Version()) version := of.OPENFLOW_1_0 if h.Version() >= uint8(of.OPENFLOW_1_2) { version = of.OPENFLOW_1_2 } h.SetVersion(uint8(version)) if err = c.WriteHeader(h.Header); err != nil { return nil, err } c.Flush() glog.V(2).Info("%v sent hello to the switch", c.ctx) var driver ofDriver switch version { case of.OPENFLOW_1_0: driver = &of10Driver{} case of.OPENFLOW_1_2: driver = &of12Driver{} } if err = driver.handshake(c); err != nil { return nil, err } if c.node.ID == nom.NodeID(0) { return nil, errors.New("ofConn: invalid node after handshake") } return driver, nil }
func (s *SwitchJoinHandler) Rcv(m beehive.Msg, ctx beehive.RcvContext) error { if m.NoReply() { return nil } joined := m.Data().(SwitchJoined) matrix := ctx.Dict(matrixDict) key := joined.Switch.Key() _, err := matrix.Get(key) if err != nil { return fmt.Errorf("Switch already exists in matrix: %+v", joined) } sw := make(SwitchStats) matrix.Put(key, sw) s.poller.query <- StatQuery{joined.Switch} glog.Infof("Switch joined: %+v", joined) return nil }
func (c *ofConn) Start(ctx bh.RcvContext) { defer func() { if c.driver != nil { c.driver.handleConnClose(c) } c.Close() // TODO(soheil): is there any better way to prevent deadlocks? glog.Infof("%v drains write queue for %v", ctx, c.RemoteAddr()) go c.drainWCh() }() c.ctx = ctx c.wCh = make(chan bh.Msg, ctx.Hive().Config().DataChBufSize) var err error if c.driver, err = c.handshake(); err != nil { glog.Errorf("Error in OpenFlow handshake: %v", err) return } stop := make(chan struct{}) wdone := make(chan struct{}) go c.doWrite(wdone, stop) rdone := make(chan struct{}) go c.doRead(rdone, stop) select { case <-rdone: close(stop) case <-wdone: close(stop) } <-rdone <-wdone }
func (h *hive) stopListener() { glog.Infof("%v closes listener...", h) if h.listener != nil { h.listener.Close() } }
func (o optimizer) Rcv(msg Msg, ctx RcvContext) error { dict := ctx.Dict(dictOptimizer) stats := getOptimizerStats(dict) infos := make(map[uint64]BeeInfo) for id, os := range stats { infos[id] = BeeInfo{} for bid := range os.Matrix { infos[bid] = BeeInfo{} } } var err error for id := range infos { infos[id], err = beeInfoFromContext(ctx, id) if err != nil { delete(infos, id) } } bhmx := make(map[uint64]map[uint64]uint64) for b, os := range stats { if os.Migrated { continue } bi, ok := infos[b] if !ok || bi.Detached { continue } if app, ok := ctx.Hive().(*hive).app(bi.App); ok && app.sticky() { continue } for fromb, cnt := range os.Matrix { if stats[fromb].Migrated { continue } frombi, ok := infos[fromb] if !ok { continue } hmx, ok := bhmx[b] if !ok { hmx = make(map[uint64]uint64) bhmx[b] = hmx } hmx[frombi.Hive] += cnt if frombi.Detached { continue } hmx, ok = bhmx[fromb] if !ok { hmx = make(map[uint64]uint64) bhmx[fromb] = hmx } hmx[bi.Hive] += cnt } } sorted := make(beeHiveStat, 0, len(stats)) for b, hmx := range bhmx { bi := infos[b] local := hmx[bi.Hive] max := uint64(0) maxh := uint64(0) for h, cnt := range hmx { if h == bi.Hive { continue } if max < cnt { max = cnt maxh = h } } if max <= 2*local { continue } os := stats[b] if max == os.LastMax { continue } os.Score++ os.LastMax = max k := formatBeeID(b) dict.Put(k, os) if os.Score <= o.minScore { continue } sorted = append(sorted, beeHiveCnt{ Bee: b, Hive: maxh, Cnt: max, }) } if len(sorted) == 0 { return nil } sort.Sort(sorted) blacklist := make(map[uint64]struct{}) for _, bhc := range sorted { bi, ok := infos[bhc.Bee] if !ok { continue } if _, ok := blacklist[bi.Hive]; ok { continue } blacklist[bhc.Hive] = struct{}{} glog.Infof("%v initiates migration of bee %v to hive %v", ctx, bhc.Bee, bhc.Hive) os := stats[bhc.Bee] ctx.SendToBee(cmdMigrate{Bee: bhc.Bee, To: bhc.Hive}, os.Collector) os.Migrated = true k := formatBeeID(bhc.Bee) dict.Put(k, os) } return nil }
// OpenStorage creates or reloads the disk-backed storage in path. func OpenStorage(node uint64, dir string, stateMachine StateMachine) ( raftStorage *etcdraft.MemoryStorage, diskStorage DiskStorage, lastSnapIdx, lastEntIdx uint64, exists bool, err error) { // TODO(soheil): maybe store and return a custom metadata. glog.V(2).Infof("openning raft storage on %s", dir) sp := path.Join(dir, "snap") wp := path.Join(dir, "wal") exists = exist(sp) && exist(wp) && wal.Exist(wp) s := snap.New(sp) raftStorage = etcdraft.NewMemoryStorage() var w *wal.WAL if !exists { mustMkdir(sp) mustMkdir(wp) w, err = createWAL(node, wp) diskStorage = &storage{w, s} return } ss, err := s.Load() if err != nil && err != snap.ErrNoSnapshot { return } if ss != nil { if err = stateMachine.Restore(ss.Data); err != nil { err = fmt.Errorf("raft: cannot restore statemachine from snapshot: %v", err) return } if err = raftStorage.ApplySnapshot(*ss); err != nil { err = fmt.Errorf("raft: cannot apply snapshot: %v", err) return } lastSnapIdx = ss.Metadata.Index glog.Infof("raft: recovered statemachine from snapshot at index %d", lastSnapIdx) } var st raftpb.HardState var ents []raftpb.Entry w, st, ents, err = readWAL(node, wp, ss) if err != nil { return } raftStorage.SetHardState(st) raftStorage.Append(ents) if len(ents) != 0 { lastEntIdx = ents[len(ents)-1].Index } else if ss != nil { lastEntIdx = ss.Metadata.Index } else { lastEntIdx = 0 } diskStorage = &storage{w, s} return }
func (o optimizer) Rcv(msg Msg, ctx RcvContext) error { dict := ctx.Dict(dictOptimizer) stats := getOptimizerStats(dict) infos := make(map[uint64]BeeInfo) for id, os := range stats { infos[id] = BeeInfo{} for bid := range os.Matrix { infos[bid] = BeeInfo{} } } var err error for id := range infos { infos[id], err = beeInfoFromContext(ctx, id) if err != nil { delete(infos, id) } } // TODO: don't hardcode this cap var capacity uint64 capacity = 1 // Count the number of bees in each hive // bees_per_hive is a map of hive ID to number of bees bees_per_hive := make(map[uint64]uint64) // Initialize an entry for each hive all_hives := ctx.Hive().(*hive).registry.hives() for _, hi := range all_hives { bees_per_hive[hi.ID] = 0 } for b, _ := range stats { bi, ok := infos[b] if !ok { continue } // Create an entry in the map if there isn't one already _, ok = bees_per_hive[bi.Hive] if !ok { bees_per_hive[bi.Hive] = 0 } // Increment the number of bees in this bee's Hive bees_per_hive[bi.Hive]++ } // Separate out the hives that do not exceed the cap full_hives := make([]uint64, 0, len(bees_per_hive)) free_hives := make([]uint64, 0, len(bees_per_hive)) for hid, count := range bees_per_hive { if count > capacity { full_hives = append(full_hives, hid) } else if count < capacity { free_hives = append(free_hives, hid) } } // No hives are over capacity if len(full_hives) == 0 { return nil } // No free hives to migrate to; too bad if len(free_hives) == 0 { return nil } // For now, migrate a random bee to a random free hive for _, hid := range full_hives { bees_in_hive := ctx.Hive().(*hive).registry.beesOfHive(hid) // Find the bee that has been receiving the fewest messages min_bid := uint64(0) min_bid_msgs := uint64(math.MaxUint64) for _, bee := range bees_in_hive { bid := bee.ID bi, ok := infos[bid] // Don't migrate certain bees if !ok || bi.Detached { continue } if app, ok := ctx.Hive().(*hive).app(bi.App); ok && app.sticky() { continue } os := stats[bid] if os.Migrated { continue } // Count the number of messages this bee has received msgs := uint64(0) for frombid, count := range os.Matrix { if stats[frombid].Migrated { continue } _, ok := infos[frombid] if !ok { continue } msgs += count } if msgs < min_bid_msgs { min_bid = bid min_bid_msgs = msgs } } // No eligible bees found; too bad if min_bid == 0 { continue } os := stats[min_bid] glog.Infof("%v initiates migration of bee %v to hive %v", ctx, min_bid, free_hives[0]) ctx.SendToBee(cmdMigrate{Bee: min_bid, To: free_hives[0]}, os.Collector) os.Migrated = true k := formatBeeID(min_bid) dict.Put(k, os) // Remove this free hive from the list of free hives free_hives = free_hives[1:] // No free hives left; oh well if len(free_hives) == 0 { return nil } } return nil }
func (g *group) apply(ready etcdraft.Ready) error { if ready.SoftState != nil { newLead := ready.SoftState.Lead if g.leader != newLead { g.stateMachine.ProcessStatusChange(LeaderChanged{ Old: g.leader, New: newLead, Term: ready.HardState.Term, }) g.leader = newLead } } // Recover from snapshot if it is more recent than the currently // applied. if !etcdraft.IsEmptySnap(ready.Snapshot) && ready.Snapshot.Metadata.Index > g.applied { if err := g.stateMachine.Restore(ready.Snapshot.Data); err != nil { glog.Fatalf("error in recovering the state machine: %v", err) } // FIXME(soheil): update the nodes and notify the application? g.applied = ready.Snapshot.Metadata.Index glog.Infof("%v recovered from incoming snapshot at index %d", g.node, g.snapped) } es := ready.CommittedEntries if len(es) == 0 { return nil } firsti := es[0].Index if firsti > g.applied+1 { glog.Fatalf( "1st index of committed entry[%d] should <= applied[%d] + 1", firsti, g.applied) } if glog.V(3) { glog.Infof("%v receives raft update: committed=%s appended=%s", g, formatEntries(es), formatEntries(ready.Entries)) } for _, e := range es { if e.Index <= g.applied { continue } switch e.Type { case raftpb.EntryNormal: if err := g.applyEntry(e); err != nil { return err } case raftpb.EntryConfChange: if err := g.applyConfChange(e); err != nil { return err } default: glog.Fatalf("unexpected entry type") } g.applied = e.Index } if g.applied-g.snapped > g.snapCount { glog.Infof("%v start to snapshot (applied: %d, lastsnap: %d)", g, g.applied, g.snapped) g.snapshot() } return nil }
func (d *of12Driver) handshake(c *ofConn) error { freq := of12.NewFeaturesRequest() if err := c.WriteHeader(freq.Header); err != nil { return err } c.Flush() glog.V(2).Info("Sent features request to the switch") hdr, err := c.ReadHeader() if err != nil { return err } v12, err := of12.ToHeader12(hdr) if err != nil { return err } frep, err := of12.ToFeaturesReply(v12) if err != nil { return err } glog.Infof("Handshake completed for switch %016x", frep.DatapathId()) glog.Infof("Disabling packet buffers in the switch.") cfg := of12.NewSwitchSetConfig() cfg.SetMissSendLen(0xFFFF) c.WriteHeader(cfg.Header) nodeID := datapathIDToNodeID(frep.DatapathId()) c.node = nom.Node{ ID: nodeID, MACAddr: datapathIDToMACAddr(frep.DatapathId()), Capabilities: []nom.NodeCapability{ nom.CapDriverRole, }, } nomDriver := nom.Driver{ BeeID: c.ctx.ID(), Role: nom.DriverRoleDefault, } c.ctx.Emit(nom.NodeConnected{ Node: c.node, Driver: nomDriver, }) d.ofPorts = make(map[uint32]*nom.Port) d.nomPorts = make(map[nom.UID]uint32) for _, p := range frep.Ports() { if p.PortNo() > uint32(of12.PP_MAX) { continue } name := p.Name() port := nom.Port{ ID: portNoToPortID(p.PortNo()), Name: string(name[:]), MACAddr: p.HwAddr(), Node: c.NodeUID(), } d.ofPorts[p.PortNo()] = &port d.nomPorts[port.UID()] = p.PortNo() glog.Infof("%v added", port) c.ctx.Emit(nom.PortStatusChanged{ Port: port, Driver: nomDriver, }) } return nil }
func (d *of10Driver) handshake(c *ofConn) error { freq := of10.NewFeaturesRequest() if err := c.WriteHeader(freq.Header); err != nil { return err } c.Flush() glog.V(2).Info("%v sent features request to the switch", c.ctx) hdr, err := c.ReadHeader() if err != nil { return err } v10, err := of10.ToHeader10(hdr) if err != nil { return err } frep, err := of10.ToFeaturesReply(v10) if err != nil { return err } glog.Infof("%v completes handshaking with switch %016x", c.ctx, frep.DatapathId()) glog.Infof("%v disables packet buffers in switch %016x", c.ctx, frep.DatapathId()) cfg := of10.NewSwitchSetConfig() cfg.SetMissSendLen(0xFFFF) c.WriteHeader(cfg.Header) nodeID := datapathIDToNodeID(frep.DatapathId()) c.node = nom.Node{ ID: nodeID, MACAddr: datapathIDToMACAddr(frep.DatapathId()), Capabilities: nil, } glog.Infof("%v is connected to %v", c.ctx, c.node) nomDriver := nom.Driver{ BeeID: c.ctx.ID(), Role: nom.DriverRoleDefault, } c.ctx.Emit(nom.NodeConnected{ Node: c.node, Driver: nomDriver, }) d.ofPorts = make(map[uint16]*nom.Port) d.nomPorts = make(map[nom.UID]uint16) for _, p := range frep.Ports() { name := p.Name() port := nom.Port{ ID: portNoToPortID(uint32(p.PortNo())), Name: string(name[:]), MACAddr: p.HwAddr(), Node: c.NodeUID(), } d.ofPorts[p.PortNo()] = &port d.nomPorts[port.UID()] = p.PortNo() glog.Infof("%v added", port) if p.PortNo() <= uint16(of10.PP_MAX) { c.ctx.Emit(nom.PortStatusChanged{ Port: port, Driver: nomDriver, }) } } return nil }
func (h LearningSwitch) Rcv(msg bh.Msg, ctx bh.RcvContext) error { in := msg.Data().(nom.PacketIn) src := in.Packet.SrcMAC() dst := in.Packet.DstMAC() glog.V(2).Infof("received packet in from %v to %v", src, dst) if dst.IsLLDP() { // TODO(soheil): just drop LLDP. glog.Infof("dropped LLDP packet to %v", dst) return nil } if dst.IsBroadcast() || dst.IsMulticast() { return h.Hub.Rcv(msg, ctx) } d := ctx.Dict("mac2port") srck := src.Key() update := false if v, err := d.Get(srck); err == nil { p := v.(nom.UID) if p != in.InPort { update = true // TODO(soheil): maybe add support for multi ports. glog.Infof("%v is moved from port %v to port %v", src, p, in.InPort) } } else { update = true } if update { if err := d.Put(srck, in.InPort); err != nil { glog.Fatalf("cannot serialize port: %v", err) } } dstk := dst.Key() v, err := d.Get(dstk) if err != nil { return h.Hub.Rcv(msg, ctx) } p := v.(nom.UID) add := nom.AddFlowEntry{ Flow: nom.FlowEntry{ Node: in.Node, Match: nom.Match{ Fields: []nom.Field{ nom.EthDst{ Addr: dst, Mask: nom.MaskNoneMAC, }, }, }, Actions: []nom.Action{ nom.ActionForward{ Ports: []nom.UID{p}, }, }, }, } ctx.ReplyTo(msg, add) out := nom.PacketOut{ Node: in.Node, InPort: in.InPort, BufferID: in.BufferID, Packet: in.Packet, Actions: []nom.Action{ nom.ActionForward{ Ports: []nom.UID{p}, }, }, } ctx.ReplyTo(msg, out) return nil }
func (g *group) save(rdsv readySaved) error { glog.V(3).Infof("%v saving state", g) if rdsv.ready.SoftState != nil && rdsv.ready.SoftState.Lead != 0 { g.node.notifyElection(g.id) } // Apply snapshot to storage if it is more updated than current snapped. if !etcdraft.IsEmptySnap(rdsv.ready.Snapshot) { if err := g.diskStorage.SaveSnap(rdsv.ready.Snapshot); err != nil { glog.Fatalf("err in save snapshot: %v", err) } g.raftStorage.ApplySnapshot(rdsv.ready.Snapshot) glog.Infof("%v saved incoming snapshot at index %d", g, rdsv.ready.Snapshot.Metadata.Index) } err := g.diskStorage.Save(rdsv.ready.HardState, rdsv.ready.Entries) if err != nil { glog.Fatalf("err in raft storage save: %v", err) } glog.V(3).Infof("%v saved state on disk", g) g.raftStorage.Append(rdsv.ready.Entries) glog.V(3).Infof("%v appended entries in storage", g) // Apply config changes in the node as soon as possible // before applying other entries in the state machine. for _, e := range rdsv.ready.CommittedEntries { if e.Type != raftpb.EntryConfChange { continue } if e.Index <= g.saved { continue } g.saved = e.Index var cc raftpb.ConfChange pbutil.MustUnmarshal(&cc, e.Data) if glog.V(2) { glog.Infof("%v applies conf change %s: %s", g, formatConfChange(cc), formatEntry(e)) } if err := g.validConfChange(cc); err != nil { glog.Errorf("%v received an invalid conf change for node %v: %v", g, cc.NodeID, err) cc.NodeID = etcdraft.None g.node.node.ApplyConfChange(g.id, cc) continue } cch := make(chan struct{}) go func() { g.confState = *g.node.node.ApplyConfChange(g.id, cc) close(cch) }() select { case <-g.node.done: return ErrStopped case <-cch: } } glog.V(3).Infof("%v successfully saved ready", g) rdsv.saved <- struct{}{} select { case g.applyc <- rdsv.ready: case <-g.node.done: } return nil }