func FLowsFromSFlowSample(ft *FlowTable, sample *layers.SFlowFlowSample, probePath string) []*Flow { flows := []*Flow{} for _, rec := range sample.Records { /* FIX(safchain): just keeping the raw packet for now */ switch rec.(type) { case layers.SFlowRawPacketFlowRecord: /* We only support RawPacket from SFlow probe */ case layers.SFlowExtendedSwitchFlowRecord: logging.GetLogger().Debug("1st layer is not SFlowRawPacketFlowRecord type") continue default: logging.GetLogger().Critical("1st layer is not a SFlow supported type") continue } record := rec.(layers.SFlowRawPacketFlowRecord) packet := &record.Header key := (FlowKey{}).fillFromGoPacket(packet) flow, _ := ft.GetFlow(key.String()) flow.ProbeGraphPath = probePath flow.fillFromGoPacket(packet) flows = append(flows, flow) } return flows }
func (probe *DockerProbe) connect() error { var err error logging.GetLogger().Debugf("Connecting to Docker daemon: %s", probe.url) probe.client, err = dockerclient.NewDockerClient(probe.url, nil) if err != nil { logging.GetLogger().Errorf("Failed to connect to Docker daemon: %s", err.Error()) return err } eventsOptions := &dockerclient.MonitorEventsOptions{ Filters: &dockerclient.MonitorEventsFilters{ Events: []string{"start", "die"}, }, } eventErrChan, err := probe.client.MonitorEvents(eventsOptions, nil) if err != nil { logging.GetLogger().Errorf("Unable to monitor Docker events: %s", err.Error()) return err } probe.wg.Add(2) probe.quit = make(chan bool) probe.connected.Store(true) defer probe.connected.Store(false) go func() { defer probe.wg.Done() containers, err := probe.client.ListContainers(false, false, "") if err != nil { logging.GetLogger().Errorf("Failed to list containers: %s", err.Error()) return } for _, c := range containers { if atomic.LoadInt64(&probe.state) != RunningState { break } probe.registerContainer(c.Id) } }() defer probe.wg.Done() for { select { case <-probe.quit: return nil case e := <-eventErrChan: if e.Error != nil { logging.GetLogger().Errorf("Got error while waiting for Docker event: %s", e.Error.Error()) return e.Error } probe.handleDockerEvent(&e.Event) } } }
func (g GremlinBackend) SetMetadata(i interface{}, k string, v interface{}) bool { var e graphElement var elType string switch i.(type) { case *Node: e = i.(*Node).graphElement elType = "V" case *Edge: e = i.(*Edge).graphElement elType = "V" } properties, err := idToPropertiesString(e.ID) if err != nil { logging.GetLogger().Error("Error while retrieving a Node: %s", err.Error()) return false } encoder := gremlin.GremlinPropertiesEncoder{} encoder.EncodeKVPair(k, v) query := "g." + elType + "().has(" + properties + ").property(" + encoder.String() + ")" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Error("Gremlin query error: %s, %s", query, err.Error()) return false } return true }
func (gfe *GraphFlowEnhancer) cacheUpdater() { logging.GetLogger().Debug("Start GraphFlowEnhancer cache updater") var mac string for { mac = <-gfe.cacheUpdaterChan logging.GetLogger().Debug("GraphFlowEnhancer request received: %s", mac) gfe.Graph.Lock() intfs := gfe.Graph.LookupNodes(graph.Metadatas{"MAC": mac}) if len(intfs) > 1 { logging.GetLogger().Info("GraphFlowEnhancer found more than one interface for the mac: %s", mac) continue } if len(intfs) == 1 { ancestors, ok := gfe.Graph.GetAncestorsTo(intfs[0], graph.Metadatas{"Type": "host"}) if ok { var path string for i := len(ancestors) - 1; i >= 0; i-- { if len(path) > 0 { path += "/" } name, _ := ancestors[i].Metadatas()["Name"] path += name.(string) } gfe.cache.Set(mac, path, cache.DefaultExpiration) } } gfe.Graph.Unlock() } }
func startTopologyClient(t *testing.T, g *graph.Graph, onReady func(*websocket.Conn), onChange func(*websocket.Conn)) error { // ready when got a first ping ws, err := connectToAgent(5, onReady) if err != nil { return err } for { _, m, err := ws.ReadMessage() if err != nil { break } err = processGraphMessage(g, m) if err != nil { return err } logging.GetLogger().Debug("%s", string(m)) logging.GetLogger().Debug("%s", g.String()) onChange(ws) } return nil }
func (g GremlinBackend) AddEdge(e *Edge) bool { properties, err := toPropertiesString(e.graphElement) if err != nil { logging.GetLogger().Error("Error while adding a new Edge: %s", err.Error()) return false } propsParent, err := idToPropertiesString(e.parent) if err != nil { logging.GetLogger().Error("Error while adding a new Edge: %s", err.Error()) return false } propsChild, err := idToPropertiesString(e.child) if err != nil { logging.GetLogger().Error("Error while adding a new Edge: %s", err.Error()) return false } query := "g.V().has(" + propsParent + ").next()" query += ".addEdge('linked', g.V().has(" + propsChild + ").next(), " + string(properties) + ")" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Error("Error while adding a new Node: %s", err.Error()) return false } return true }
func (c *GremlinClient) Connect() { host := c.Addr + ":" + strconv.FormatInt(int64(c.Port), 10) conn, err := net.Dial("tcp", host) if err != nil { logging.GetLogger().Error("Connection to the WebSocket server failed: %s", err.Error()) return } endpoint := "ws://" + host u, err := url.Parse(endpoint) if err != nil { logging.GetLogger().Error("Unable to parse the WebSocket Endpoint %s: %s", endpoint, err.Error()) return } wsConn, _, err := websocket.NewClient(conn, u, http.Header{}, 0, 4096) if err != nil { logging.GetLogger().Error("Unable to create a WebSocket connection %s : %s", endpoint, err.Error()) return } logging.GetLogger().Info("Connected to gremlin server %s:%d", c.Addr, c.Port) c.wsConn = wsConn }
func (s *Server) handleUDPFlowPacket() { s.conn.SetDeadline(time.Now().Add(200 * time.Millisecond)) data := make([]byte, 4096) for s.running.Load() == true { n, _, err := s.conn.ReadFromUDP(data) if err != nil { if err.(net.Error).Timeout() == true { s.conn.SetDeadline(time.Now().Add(200 * time.Millisecond)) continue } if s.running.Load() == false { return } logging.GetLogger().Errorf("Error while reading: %s", err.Error()) return } f, err := flow.FromData(data[0:n]) if err != nil { logging.GetLogger().Errorf("Error while parsing flow: %s", err.Error()) } s.AnalyzeFlows([]*flow.Flow{f}) } }
func (g GremlinBackend) GetEdge(i Identifier) *Edge { properties, err := idToPropertiesString(i) if err != nil { logging.GetLogger().Error("Error while retrieving a Node: %s", err.Error()) return nil } query := "g.E().has(" + properties + ")" els, err := g.client.QueryElements(query) if err != nil { return nil } switch l := len(els); { case l == 0: return nil case l > 1: logging.GetLogger().Error("Found more than one edge for this ID: " + string(i)) return nil } edge := gremElementToEdge(els[0]) parent, child := g.GetEdgeNodes(edge) if parent == nil || child == nil { return nil } edge.parent = parent.ID edge.child = child.ID return edge }
func (u *NetNSTopoUpdater) start() { runtime.LockOSThread() defer runtime.UnlockOSThread() watcher, err := inotify.NewWatcher() if err != nil { logging.GetLogger().Error("Unable to create a new Watcher: %s", err.Error()) return } err = watcher.Watch(runBaseDir) if err != nil { logging.GetLogger().Error("Unable to Watch %s: %s", runBaseDir, err.Error()) return } u.initialize() for { select { case ev := <-watcher.Event: if ev.Mask&inotify.IN_CREATE > 0 { u.onNetNsCreated(ev.Name) } if ev.Mask&inotify.IN_DELETE > 0 { u.onNetNsDeleted(ev.Name) } case err := <-watcher.Error: logging.GetLogger().Error("Error while watching network namespace: %s", err.Error()) } } }
func FlowsFromSFlowSample(ft *Table, sample *layers.SFlowFlowSample, setter FlowProbePathSetter) []*Flow { flows := []*Flow{} for _, rec := range sample.Records { /* FIX(safchain): just keeping the raw packet for now */ switch rec.(type) { case layers.SFlowRawPacketFlowRecord: /* We only support RawPacket from SFlow probe */ case layers.SFlowExtendedSwitchFlowRecord: logging.GetLogger().Debug("1st layer is not SFlowRawPacketFlowRecord type") continue default: logging.GetLogger().Critical("1st layer is not a SFlow supported type") continue } record := rec.(layers.SFlowRawPacketFlowRecord) flow := FlowFromGoPacket(ft, &record.Header, setter) if flow != nil { flows = append(flows, flow) } } return flows }
func (c *Forwarder) triggerResync() { logging.GetLogger().Infof("Start a resync of the graph") hostname, err := os.Hostname() if err != nil { logging.GetLogger().Errorf("Unable to retrieve the hostname: %s", err.Error()) return } c.Graph.Lock() defer c.Graph.Unlock() // request for deletion of everythin belonging to host node root := c.Graph.GetNode(Identifier(hostname)) if root == nil { return } c.Client.SendWSMessage(WSMessage{"SubGraphDeleted", root}) // re-added all the nodes and edges nodes := c.Graph.GetNodes() for _, n := range nodes { c.Client.SendWSMessage(WSMessage{"NodeAdded", n}) } edges := c.Graph.GetEdges() for _, e := range edges { c.Client.SendWSMessage(WSMessage{"EdgeAdded", e}) } }
func (a *Agent) Start() { var err error go a.WSServer.ListenAndServe() addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client %s", err.Error()) os.Exit(1) } if addr != "" { authOptions := &shttp.AuthenticationOpts{ Username: config.GetConfig().GetString("agent.analyzer_username"), Password: config.GetConfig().GetString("agent.analyzer_password"), } authClient := shttp.NewAuthenticationClient(addr, port, authOptions) a.WSClient, err = shttp.NewWSAsyncClient(addr, port, "/ws", authClient) if err != nil { logging.GetLogger().Errorf("Unable to instantiate analyzer client %s", err.Error()) os.Exit(1) } graph.NewForwarder(a.WSClient, a.Graph) a.WSClient.Connect() // send a first reset event to the analyzers a.Graph.DelSubGraph(a.Root) } a.TopologyProbeBundle = tprobes.NewTopologyProbeBundleFromConfig(a.Graph, a.Root) a.TopologyProbeBundle.Start() a.FlowProbeBundle = fprobes.NewFlowProbeBundleFromConfig(a.TopologyProbeBundle, a.Graph) a.FlowProbeBundle.Start() if addr != "" { a.EtcdClient, err = etcd.NewEtcdClientFromConfig() if err != nil { logging.GetLogger().Errorf("Unable to start etcd client %s", err.Error()) os.Exit(1) } captureHandler := &api.BasicApiHandler{ ResourceHandler: &api.CaptureHandler{}, EtcdKeyAPI: a.EtcdClient.KeysApi, } l, err := fprobes.NewOnDemandProbeListener(a.FlowProbeBundle, a.Graph, captureHandler) if err != nil { logging.GetLogger().Errorf("Unable to start on-demand flow probe %s", err.Error()) os.Exit(1) } a.OnDemandProbeListener = l a.OnDemandProbeListener.Start() } go a.HTTPServer.ListenAndServe() }
func NewFlowProbeBundleFromConfig(tb *probes.TopologyProbeBundle, g *graph.Graph) *FlowProbeBundle { list := config.GetConfig().GetStringSlice("agent.flow.probes") logging.GetLogger().Infof("Flow probes: %v", list) gfe := mappings.NewGraphFlowEnhancer(g) var aclient *analyzer.Client addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client: %s", err.Error()) return nil } if addr != "" { aclient, err = analyzer.NewClient(addr, port) if err != nil { logging.GetLogger().Errorf("Analyzer client error %s:%d : %s", addr, port, err.Error()) return nil } } probes := make(map[string]probe.Probe) for _, t := range list { if _, ok := probes[t]; ok { continue } switch t { case "ovssflow": ofe := mappings.NewOvsFlowEnhancer(g) pipeline := mappings.NewFlowMappingPipeline(gfe, ofe) o := NewOvsSFlowProbesHandler(tb, g, pipeline, aclient) if o != nil { probes[t] = o } case "pcap": pipeline := mappings.NewFlowMappingPipeline(gfe) o := NewPcapProbesHandler(tb, g, pipeline, aclient) if o != nil { probes[t] = o } default: logging.GetLogger().Errorf("unknown probe type %s", t) } } p := probe.NewProbeBundle(probes) return &FlowProbeBundle{ ProbeBundle: *p, Graph: g, } }
func (c *WSClient) processGraphMessage(m []byte) { c.server.Graph.Lock() defer c.server.Graph.Unlock() msg, err := UnmarshalWSMessage(m) if err != nil { logging.GetLogger().Errorf("Graph: Unable to parse the event %s: %s", msg, err.Error()) return } g := c.server.Graph switch msg.Type { case "SyncRequest": reply := WSMessage{ Type: "SyncReply", Obj: c.server.Graph, } c.send <- []byte(reply.String()) case "SubGraphDeleted": n := msg.Obj.(*Node) logging.GetLogger().Debugf("Got SubGraphDeleted event from the node %s", n.ID) node := g.GetNode(n.ID) if node != nil { g.DelSubGraph(node) } case "NodeUpdated": n := msg.Obj.(*Node) node := g.GetNode(n.ID) if node != nil { g.SetMetadata(node, n.metadata) } case "NodeDeleted": g.DelNode(msg.Obj.(*Node)) case "NodeAdded": n := msg.Obj.(*Node) if g.GetNode(n.ID) == nil { g.AddNode(n) } case "EdgeUpdated": e := msg.Obj.(*Edge) edge := g.GetEdge(e.ID) if edge != nil { g.SetMetadata(edge, e.metadata) } case "EdgeDeleted": g.DelEdge(msg.Obj.(*Edge)) case "EdgeAdded": e := msg.Obj.(*Edge) if g.GetEdge(e.ID) == nil { g.AddEdge(e) } } }
func (g GremlinBackend) SetMetadata(i interface{}, meta Metadata) bool { var e graphElement var elType string switch i.(type) { case *Node: e = i.(*Node).graphElement elType = "V" case *Edge: e = i.(*Edge).graphElement elType = "V" } properties, err := idToPropertiesString(e.ID) if err != nil { logging.GetLogger().Errorf("Error while retrieving a Node: %s", err.Error()) return false } query := "g." + elType + "().has(" + properties + ")" els, err := g.client.QueryElements(query) if err != nil || len(els) == 0 { return false } if len(els) > 1 { logging.GetLogger().Errorf("Found more than one node for this ID: " + string(e.ID)) return false } el := els[0] query = "g." + elType + "(" + string(el.ID) + ").properties().drop()" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Gremlin query error: %s, %s", query, err.Error()) return false } j := meta.String() query = "g." + elType + "(" + string(el.ID) + ")" query += `.sideEffect{v = it; ["_ID": "` + string(e.ID) + `"` query += `, "_host": "` + string(e.host) + `"` query += `,` + j[1:len(j)-1] + `]` query += `.each{v.get().property(it.key, it.value)}}` _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Gremlin query error: %s, %s", query, err.Error()) return false } return true }
func (u *NetLinkTopoUpdater) onLinkAdded(index int) { logging.GetLogger().Debug("Link added: %d", index) link, err := netlink.LinkByIndex(index) if err != nil { logging.GetLogger().Error("Failed to find interface %d: %s", index, err.Error()) return } u.addLinkToTopology(link) }
func (o *OnDemandProbeListener) registerProbe(n *graph.Node, capture *api.Capture) { fprobe := o.probeFromType(n) if fprobe == nil { logging.GetLogger().Errorf("Failed to register flow probe, unknown type") return } if err := fprobe.RegisterProbe(n, capture); err != nil { logging.GetLogger().Debugf("Failed to register flow probe: %s", err.Error()) } }
func (c *Client) SendFlows(flows []*flow.Flow) { for _, flow := range flows { j, _ := json.Marshal(flow) logging.GetLogger().Debug("Sending to analyzer: %s", string(j)) err := c.SendFlow(flow) if err != nil { logging.GetLogger().Error("Unable to send flow: ", err.Error()) } } }
func (mapper *NeutronMapper) cacheUpdater() { logging.GetLogger().Debug("Start Neutron cache updater") var mac string for { mac = <-mapper.cacheUpdaterChan logging.GetLogger().Debug("Mac request received: %s", mac) attrs := mapper.retrieveAttributes(mac) mapper.cache.Set(mac, attrs, cache.DefaultExpiration) } }
func (o *OnDemandProbeListener) registerProbe(n *graph.Node, capture *api.Capture) { fprobe := o.probeFromType(n) if fprobe == nil { logging.GetLogger().Errorf("Failed to register flow probe, unknown type %v", n) return } if err := fprobe.RegisterProbe(n, capture); err != nil { logging.GetLogger().Debugf("Failed to register flow probe: %s", err.Error()) } o.Graph.AddMetadata(n, "State.FlowCapture", "ON") }
func (nu *NetNsNetLinkTopoUpdater) Start(path string) { name := getNetNSName(path) logging.GetLogger().Debugf("Starting NetLinkTopoUpdater for NetNS: %s", name) runtime.LockOSThread() defer runtime.UnlockOSThread() origns, err := netns.Get() if err != nil { logging.GetLogger().Errorf("Error while switching from root ns to %s: %s", name, err.Error()) return } defer origns.Close() time.Sleep(1 * time.Second) newns, err := netns.GetFromPath(path) if err != nil { logging.GetLogger().Errorf("Error while switching from root ns to %s: %s", name, err.Error()) return } defer newns.Close() err = netns.Set(newns) if err != nil { logging.GetLogger().Errorf("Error while switching from root ns to %s: %s", name, err.Error()) return } /* start a netlinks updater inside this namespace */ nu.Lock() nu.nlProbe = NewNetLinkProbe(nu.Graph, nu.Root) nu.Unlock() /* NOTE(safchain) don't Start just Run, need to keep it alive for the time life of the netns * and there is no need to have a new goroutine here */ nu.nlProbe.Run() nu.Lock() nu.nlProbe = nil nu.Unlock() logging.GetLogger().Debugf("NetLinkTopoUpdater stopped for NetNS: %s", name) netns.Set(origns) }
func (a *AlertManager) EvalNodes() { a.alertsLock.RLock() defer a.alertsLock.RUnlock() for _, al := range a.alerts { nodes := a.Graph.LookupNodesFromKey(al.Select) for _, n := range nodes { w := eval.NewWorld() defConst := func(name string, val interface{}) { t, v := toTypeValue(val) w.DefineConst(name, t, v) } for k, v := range n.Metadata() { defConst(k, v) } fs := token.NewFileSet() toEval := "(" + al.Test + ") == true" expr, err := w.Compile(fs, toEval) if err != nil { logging.GetLogger().Error("Can't compile expression : " + toEval) continue } ret, err := expr.Run() if err != nil { logging.GetLogger().Error("Can't evaluate expression : " + toEval) continue } if ret.String() == "true" { al.Count++ msg := AlertMessage{ UUID: al.UUID, Type: FIXED, Timestamp: time.Now(), Count: al.Count, Reason: al.Action, ReasonData: n, } logging.GetLogger().Debugf("AlertMessage to WS : " + al.UUID + " " + msg.String()) for _, l := range a.eventListeners { l.OnAlert(&msg) } } } } }
func (u *NetNSProbe) Unregister(path string) { name := getNetNSName(path) logging.GetLogger().Debugf("Network Namespace deleted: %s", name) u.RLock() nu, ok := u.nsnlProbes[name] u.RUnlock() if !ok { return } nu.Stop() u.Graph.Lock() defer u.Graph.Unlock() children := nu.Graph.LookupChildren(nu.Root, graph.Metadata{}) for _, child := range children { u.Graph.DelNode(child) } u.Graph.DelNode(nu.Root) u.Lock() delete(u.nsnlProbes, name) u.Unlock() }
func (u *NetNSProbe) Register(path string, extraMetadata *graph.Metadata) { name := getNetNSName(path) u.RLock() _, ok := u.nsnlProbes[name] u.RUnlock() if ok { return } u.Graph.Lock() defer u.Graph.Unlock() logging.GetLogger().Debugf("Network Namespace added: %s", name) metadata := graph.Metadata{"Name": name, "Type": "netns"} if extraMetadata != nil { for k, v := range *extraMetadata { metadata[k] = v } } n := u.Graph.NewNode(graph.GenID(), metadata) u.Graph.Link(u.Root, n) nu := NewNetNsNetLinkTopoUpdater(u.Graph, n) go nu.Start(path) u.Lock() u.nsnlProbes[name] = nu u.Unlock() }
func (c *WSClient) writePump(wg *sync.WaitGroup, quit chan struct{}) { ticker := time.NewTicker(c.server.pingPeriod) defer func() { ticker.Stop() c.conn.Close() }() for { select { case message, ok := <-c.send: if !ok { c.write(websocket.CloseMessage, []byte{}) wg.Done() return } if err := c.write(websocket.TextMessage, message); err != nil { logging.GetLogger().Warningf("Error while writing to the websocket: %s", err.Error()) wg.Done() return } case <-ticker.C: if err := c.write(websocket.PingMessage, []byte{}); err != nil { wg.Done() return } case <-quit: wg.Done() return } } }
func (a *Agent) Start() { var err error // send a first reset event to the analyzers a.Graph.DelSubGraph(a.Root) addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client %s", err.Error()) os.Exit(1) } if addr != "" { a.Gclient = graph.NewAsyncClient(addr, port, "/ws/graph") graph.NewForwarder(a.Gclient, a.Graph) a.Gclient.Connect() } a.TopologyProbeBundle = tprobes.NewTopologyProbeBundleFromConfig(a.Graph, a.Root) a.TopologyProbeBundle.Start() a.FlowProbeBundle = fprobes.NewFlowProbeBundleFromConfig(a.TopologyProbeBundle, a.Graph) a.FlowProbeBundle.Start() go a.TopologyServer.ListenAndServe() go a.GraphServer.ListenAndServe() }
func toTypeValue(val interface{}) (eval.Type, eval.Value) { switch val := val.(type) { case bool: r := boolV(val) return eval.BoolType, &r case uint8: r := uint8V(val) return eval.Uint8Type, &r case uint32: r := uint32V(val) return eval.Uint32Type, &r case uint: r := uintV(val) return eval.Uint64Type, &r case int: r := intV(val) return eval.Int64Type, &r case float64: r := float64V(val) return eval.Float64Type, &r case string: r := stringV(val) return eval.StringType, &r } logging.GetLogger().Errorf("toValue(%T) not implemented", val) return nil, nil }
func FLowsFromSFlowSample(ft *FlowTable, sample *layers.SFlowFlowSample, probePath *string) []*Flow { flows := []*Flow{} for _, rec := range sample.Records { /* FIX(safchain): just keeping the raw packet for now */ record, ok := rec.(layers.SFlowRawPacketFlowRecord) if !ok { logging.GetLogger().Critical("1st layer is not SFlowRawPacketFlowRecord type") continue } packet := &record.Header key := (FlowKey{}).fillFromGoPacket(packet) flow, new := ft.GetFlow(key.String()) if new { flow.ProbeGraphPath = "" if probePath != nil { flow.ProbeGraphPath = *probePath } } flow.fillFromGoPacket(packet) flows = append(flows, flow) } return flows }
func NewOvsSFlowProbesHandlerFromConfig(tb *probes.TopologyProbeBundle, g *graph.Graph, p *mappings.FlowMappingPipeline, a *analyzer.Client) *OvsSFlowProbesHandler { probe := tb.GetProbe("ovsdb") if probe == nil { return nil } agent, err := sflow.NewSFlowAgentFromConfig(g) if err != nil { logging.GetLogger().Errorf("Unable to start an OVS SFlow probe handler: %s", err.Error()) return nil } agent.SetMappingPipeline(p) if a != nil { agent.SetAnalyzerClient(a) } expire := config.GetConfig().GetInt("cache.expire") cleanup := config.GetConfig().GetInt("cache.cleanup") o := NewOvsSFlowProbesHandler(probe.(*probes.OvsdbProbe), agent, expire, cleanup) agent.SetProbePathGetter(o) return o }