func startTopologyClient(t *testing.T, g *graph.Graph, onReady func(*websocket.Conn), onChange func(*websocket.Conn)) error { // ready when got a first ping ws, err := connectToAgent(5, onReady) if err != nil { return err } for { _, m, err := ws.ReadMessage() if err != nil { break } err = processGraphMessage(g, m) if err != nil { return err } logging.GetLogger().Debugf("%s", string(m)) logging.GetLogger().Debugf("%s", g.String()) onChange(ws) } return nil }
func FlowsFromSFlowSample(ft *Table, sample *layers.SFlowFlowSample, setter FlowProbeNodeSetter) []*Flow { flows := []*Flow{} for _, rec := range sample.Records { /* FIX(safchain): just keeping the raw packet for now */ switch rec.(type) { case layers.SFlowRawPacketFlowRecord: /* We only support RawPacket from SFlow probe */ case layers.SFlowExtendedSwitchFlowRecord: logging.GetLogger().Debug("1st layer is not SFlowRawPacketFlowRecord type") continue default: logging.GetLogger().Critical("1st layer is not a SFlow supported type") continue } record := rec.(layers.SFlowRawPacketFlowRecord) goFlows := FlowsFromGoPacket(ft, &record.Header, int64(record.FrameLength), setter) if goFlows != nil { flows = append(flows, goFlows...) } } return flows }
func (g GremlinBackend) GetEdge(i Identifier, t *time.Time) *Edge { properties, err := idToPropertiesString(i) if err != nil { logging.GetLogger().Errorf("Error while retrieving a Node: %s", err.Error()) return nil } query := "g.E().has(" + properties + ")" els, err := g.client.QueryElements(query) if err != nil { return nil } switch l := len(els); { case l == 0: return nil case l > 1: logging.GetLogger().Error("Found more than one edge for this ID: " + string(i)) return nil } edge := gremElementToEdge(els[0]) parent, child := g.GetEdgeNodes(edge, t) if parent == nil || child == nil { return nil } edge.parent = parent.ID edge.child = child.ID return edge }
func (s *Server) SetStorageFromConfig() { if t := config.GetConfig().GetString("analyzer.storage"); t != "" { var ( err error storage storage.Storage ) switch t { case "elasticsearch": storage, err = elasticsearch.New() if err != nil { logging.GetLogger().Fatalf("Can't connect to ElasticSearch server: %v", err) } case "orientdb": storage, err = orientdb.New() if err != nil { logging.GetLogger().Fatalf("Can't connect to OrientDB server: %v", err) } default: logging.GetLogger().Fatalf("Storage type unknown: %s", t) os.Exit(1) } s.SetStorage(storage) logging.GetLogger().Infof("Using %s as storage", t) } }
func (c *CaptureApiHandler) getCaptureCount(r ApiResource) ApiResource { capture := r.(*Capture) tr := traversal.NewGremlinTraversalParser(strings.NewReader(capture.GremlinQuery), c.Graph) ts, err := tr.Parse() if err != nil { logging.GetLogger().Errorf("Gremlin expression error: %s", err.Error()) return r } res, err := ts.Exec() if err != nil { logging.GetLogger().Errorf("Gremlin execution error: %s", err.Error()) return r } for _, value := range res.Values() { switch value.(type) { case *graph.Node: n := value.(*graph.Node) if common.IsCaptureAllowed(n.Metadata()["Type"].(string)) { capture.Count = capture.Count + 1 } case []*graph.Node: capture.Count = capture.Count + len(value.([]*graph.Node)) default: capture.Count = 0 } } return capture }
func (o *OnDemandProbeServer) unregisterProbe(n *graph.Node) bool { if !o.isActive(n) { return false } o.Lock() c := o.captures[n.ID] o.Unlock() fprobe, err := o.getProbe(n, c) if fprobe == nil { if err != nil { logging.GetLogger().Error(err.Error()) } return false } if err := fprobe.UnregisterProbe(n); err != nil { logging.GetLogger().Debugf("Failed to unregister flow probe: %s", err.Error()) } o.Lock() o.fta.Release(o.activeProbes[n.ID]) delete(o.activeProbes, n.ID) delete(o.captures, n.ID) o.Unlock() return true }
func (f *TableClient) lookupFlows(flowset chan *FlowSet, host string, flowSearchQuery *FlowSearchQuery) { obj, _ := proto.Marshal(flowSearchQuery) tq := TableQuery{ Type: "FlowSearchQuery", Obj: obj, } msg := shttp.NewWSMessage(Namespace, "TableQuery", tq) ch := make(chan *json.RawMessage) defer close(ch) f.replyChanMutex.Lock() f.replyChan[msg.UUID] = ch f.replyChanMutex.Unlock() defer func() { f.replyChanMutex.Lock() delete(f.replyChan, msg.UUID) f.replyChanMutex.Unlock() }() if !f.WSServer.SendWSMessageTo(msg, host) { logging.GetLogger().Errorf("Unable to send message to agent: %s", host) flowset <- NewFlowSet() return } select { case raw := <-ch: var reply TableReply if err := json.Unmarshal([]byte(*raw), &reply); err != nil { logging.GetLogger().Errorf("Error returned while reading TableReply from: %s", host) break } if reply.Status != http.StatusOK { logging.GetLogger().Errorf("Error %d TableReply from: %s", reply.Status, host) break } fs := NewFlowSet() context := MergeContext{Sorted: flowSearchQuery.Sort, Dedup: flowSearchQuery.Dedup} for _, b := range reply.Obj { var fsr FlowSearchReply if err := proto.Unmarshal(b, &fsr); err != nil { logging.GetLogger().Errorf("Unable to decode flow search reply from: %s", host) continue } fs.Merge(fsr.FlowSet, context) } flowset <- fs return case <-time.After(time.Second * 10): logging.GetLogger().Errorf("Timeout while reading TableReply from: %s", host) } flowset <- NewFlowSet() }
func (g GremlinBackend) AddMetadata(i interface{}, k string, v interface{}) bool { var e graphElement var elType string switch i.(type) { case *Node: e = i.(*Node).graphElement elType = "V" case *Edge: e = i.(*Edge).graphElement elType = "V" } properties, err := idToPropertiesString(e.ID) if err != nil { logging.GetLogger().Errorf("Error while retrieving a Node: %s", err.Error()) return false } encoder := gremlin.GremlinPropertiesEncoder{} encoder.EncodeKVPair(k, v) query := "g." + elType + "().has(" + properties + ").property(" + encoder.String() + ")" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Gremlin query error: %s, %s", query, err.Error()) return false } return true }
func (mapper *NeutronMapper) nodeUpdater() { logging.GetLogger().Debugf("Starting Neutron updater") for nodeID := range mapper.nodeUpdaterChan { node := mapper.graph.GetNode(nodeID) if node == nil { continue } if _, ok := node.Metadata()["MAC"]; !ok { continue } portMd := retrievePortMetadata(node.Metadata()) attrs, err := mapper.retrieveAttributes(portMd) if err != nil { if nerr, ok := err.(NeutronPortNotFound); ok { logging.GetLogger().Debugf("Setting in cache not found MAC %s", nerr.MAC) mapper.cache.Set(nerr.MAC, PortMetadata{}, cache.DefaultExpiration) } else { logging.GetLogger().Errorf("Failed to retrieve attributes for port %s/%s : %v", portMd.portID, portMd.mac, err) } continue } mapper.updateNode(node, attrs) mapper.cache.Set(node.Metadata()["MAC"].(string), portMd, cache.DefaultExpiration) } logging.GetLogger().Debugf("Stopping Neutron updater") }
func (c *OrientDBStorage) StoreFlows(flows []*flow.Flow) error { // TODO: use batch of operations for _, flow := range flows { flowDoc, err := c.client.Upsert(flowToDocument(flow), "UUID") if err != nil { logging.GetLogger().Errorf("Error while pushing flow %s: %s\n", flow.UUID, err.Error()) return err } flowID, ok := flowDoc["@rid"] if !ok { logging.GetLogger().Errorf("No @rid attribute for flow '%s'", flow.UUID) return err } if flow.LastUpdateMetric != nil { doc := metricToDocument(flow.LastUpdateMetric) doc["Flow"] = flowID if _, err := c.client.CreateDocument(doc); err != nil { logging.GetLogger().Errorf("Error while pushing metric %+v: %s\n", flow.LastUpdateMetric, err.Error()) continue } } } return nil }
func (b *ElasticSearchBackend) archiveElement(kind string, id Identifier) bool { resp, err := b.client.Get(kind, string(id)) if err != nil || !resp.Found { return false } var obj map[string]interface{} if err := json.Unmarshal([]byte(*resp.Source), &obj); err != nil { return false } if _, err := b.client.Delete(kind, string(id)); err != nil { logging.GetLogger().Errorf("Error while deleting %s %s: %s", kind, id, err.Error()) return false } obj["DeletedAt"] = time.Now().Unix() obj["CreatedAt"] = int64(obj["CreatedAt"].(float64)) // Archive the element with a different ES id if err := b.client.Index(kind, string(GenID()), obj); err != nil { logging.GetLogger().Errorf("Error while deleting %s %s: %s", kind, id, err.Error()) return false } return true }
func (o *OnDemandProbeListener) registerProbe(n *graph.Node, capture *api.Capture) bool { o.Lock() defer o.Unlock() if _, ok := o.activeProbes[n.ID]; ok { logging.GetLogger().Debugf("A probe already exists for %s", n.ID) return false } if _, ok := n.Metadata()["Type"]; !ok { logging.GetLogger().Infof("Do not register flow probe, type not supported %v", n) return false } fprobe := o.getProbe(n, capture) if fprobe == nil { logging.GetLogger().Errorf("Failed to register flow probe, unknown type %v", n) return false } ft := o.fta.Alloc(fprobe.AsyncFlowPipeline) if err := fprobe.RegisterProbe(n, capture, ft); err != nil { logging.GetLogger().Debugf("Failed to register flow probe: %s", err.Error()) o.fta.Release(ft) return false } o.activeProbes[n.ID] = ft o.captures[n.ID] = capture logging.GetLogger().Debugf("New active probe on: %v", n) return true }
func (o *OnDemandProbeClient) matchGremlinExpr(node *graph.Node, gremlin string) bool { tr := traversal.NewGremlinTraversalParser(strings.NewReader(gremlin), o.graph) ts, err := tr.Parse() if err != nil { logging.GetLogger().Errorf("Gremlin expression error: %s", err.Error()) return false } res, err := ts.Exec() if err != nil { logging.GetLogger().Errorf("Gremlin execution error: %s", err.Error()) return false } for _, value := range res.Values() { n, ok := value.(*graph.Node) if !ok { logging.GetLogger().Error("Gremlin expression doesn't return node") return false } if node.ID == n.ID { return true } } return false }
func (s *Server) handleUDPFlowPacket() { s.conn.SetDeadline(time.Now().Add(200 * time.Millisecond)) data := make([]byte, 4096) for s.running.Load() == true { n, _, err := s.conn.ReadFromUDP(data) if err != nil { if err.(net.Error).Timeout() == true { s.conn.SetDeadline(time.Now().Add(200 * time.Millisecond)) continue } if s.running.Load() == false { return } logging.GetLogger().Errorf("Error while reading: %s", err.Error()) return } f, err := flow.FromData(data[0:n]) if err != nil { logging.GetLogger().Errorf("Error while parsing flow: %s", err.Error()) } s.AnalyzeFlows([]*flow.Flow{f}) } }
func (g GremlinBackend) AddEdge(e *Edge) bool { properties, err := toPropertiesString(e.graphElement) if err != nil { logging.GetLogger().Errorf("Error while adding a new Edge: %s", err.Error()) return false } propsParent, err := idToPropertiesString(e.parent) if err != nil { logging.GetLogger().Errorf("Error while adding a new Edge: %s", err.Error()) return false } propsChild, err := idToPropertiesString(e.child) if err != nil { logging.GetLogger().Errorf("Error while adding a new Edge: %s", err.Error()) return false } query := "g.V().has(" + propsParent + ").next()" query += ".addEdge('linked', g.V().has(" + propsChild + ").next(), " + string(properties) + ")" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Error while adding a new Node: %s", err.Error()) return false } return true }
func (s *Session) Eval(in string) error { logging.GetLogger().Debugf("eval >>> %q", in) for _, command := range commands { if command.name == "g" && strings.HasPrefix(in, command.name) { err := command.action(s, in) if err != nil { logging.GetLogger().Errorf("%s: %s", command.name, err) } return nil } arg := strings.TrimPrefix(in, ":"+command.name) if arg == in { continue } if arg == "" || strings.HasPrefix(arg, " ") { arg = strings.TrimSpace(arg) err := command.action(s, arg) if err != nil { if err == ErrQuit { return err } logging.GetLogger().Errorf("%s: %s", command.name, err) } return nil } } return nil }
func (mapper *OpenContrailMapper) onVhostAdded(node *graph.Node, itf collection.Element) { phyItf, err := itf.GetField("physical_interface") if err != nil { return } mapper.vHost = node m := graph.Metadata{"Name": phyItf} nodes := mapper.graph.LookupChildren(mapper.root, m, graph.Metadata{"RelationType": "ownership"}) switch { case len(nodes) == 0: logging.GetLogger().Errorf("Physical interface %s not found", phyItf) return case len(nodes) > 1: logging.GetLogger().Errorf("Multiple physical interfaces found : %v", nodes) return } mapper.linkToVhost(nodes[0]) for _, n := range mapper.pendingLinks { mapper.linkToVhost(n) } mapper.pendingLinks = mapper.pendingLinks[:0] mapper.graph.AddMetadata(nodes[0], "MPLSUDPPort", mapper.mplsUDPPort) }
func (pc *PacketInjectorClient) injectPacket(host string, pp *PacketParams, status chan *string) { msg := shttp.NewWSMessage(Namespace, "InjectPacket", pp) ch := make(chan *json.RawMessage) defer close(ch) pc.replyChanMutex.Lock() pc.replyChan[msg.UUID] = ch pc.replyChanMutex.Unlock() defer func() { pc.replyChanMutex.Lock() delete(pc.replyChan, msg.UUID) pc.replyChanMutex.Unlock() }() if !pc.WSServer.SendWSMessageTo(msg, host) { e := fmt.Sprintf("Unable to send message to agent: %s", host) logging.GetLogger().Errorf(e) status <- &e return } data := <-ch var reply string if err := json.Unmarshal([]byte(*data), &reply); err != nil { e := fmt.Sprintf("Error while reading reply from: %s", host) logging.GetLogger().Errorf(e) status <- &e return } status <- &reply }
func NewFlowProbeBundleFromConfig(tb *probes.TopologyProbeBundle, g *graph.Graph, fta *flow.TableAllocator) *FlowProbeBundle { list := config.GetConfig().GetStringSlice("agent.flow.probes") logging.GetLogger().Infof("Flow probes: %v", list) gfe := mappings.NewGraphFlowEnhancer(g) var aclient *analyzer.Client addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client: %s", err.Error()) return nil } if addr != "" { aclient, err = analyzer.NewClient(addr, port) if err != nil { logging.GetLogger().Errorf("Analyzer client error %s:%d : %s", addr, port, err.Error()) return nil } } probes := make(map[string]probe.Probe) for _, t := range list { if _, ok := probes[t]; ok { continue } switch t { case "ovssflow": o := NewOvsSFlowProbesHandler(tb, g) if o != nil { ofe := mappings.NewOvsFlowEnhancer(g) pipeline := mappings.NewFlowMappingPipeline(gfe, ofe) probes[t] = FlowProbe{fpi: o, pipeline: pipeline, client: aclient} } case "gopacket": o := NewGoPacketProbesHandler(g) if o != nil { pipeline := mappings.NewFlowMappingPipeline(gfe) gopacket := FlowProbe{fpi: o, pipeline: pipeline, client: aclient} probes["afpacket"] = gopacket probes["pcap"] = gopacket } default: logging.GetLogger().Errorf("unknown probe type %s", t) } } p := probe.NewProbeBundle(probes) return &FlowProbeBundle{ ProbeBundle: *p, Graph: g, FlowTableAllocator: fta, } }
func (g GremlinBackend) SetMetadata(i interface{}, meta Metadata) bool { var e graphElement var elType string switch i.(type) { case *Node: e = i.(*Node).graphElement elType = "V" case *Edge: e = i.(*Edge).graphElement elType = "V" } properties, err := idToPropertiesString(e.ID) if err != nil { logging.GetLogger().Errorf("Error while retrieving a Node: %s", err.Error()) return false } query := "g." + elType + "().has(" + properties + ")" els, err := g.client.QueryElements(query) if err != nil || len(els) == 0 { return false } if len(els) > 1 { logging.GetLogger().Errorf("Found more than one node for this ID: " + string(e.ID)) return false } el := els[0] query = "g." + elType + "(" + string(el.ID) + ").properties().drop()" _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Gremlin query error: %s, %s", query, err.Error()) return false } j := meta.String() query = "g." + elType + "(" + string(el.ID) + ")" query += `.sideEffect{v = it; ["_ID": "` + string(e.ID) + `"` query += `, "_host": "` + string(e.host) + `"` query += `,` + j[1:len(j)-1] + `]` query += `.each{v.get().property(it.key, it.value)}}` _, err = g.client.Query(query) if err != nil { logging.GetLogger().Errorf("Gremlin query error: %s, %s", query, err.Error()) return false } return true }
func (s *WSServer) serveMessages(w http.ResponseWriter, r *auth.AuthenticatedRequest) { // if X-Host-ID specified avoid having twice the same ID hostID := r.Header.Get("X-Host-ID") if hostID != "" { s.RLock() for c := range s.clients { if c.host == hostID { logging.GetLogger().Errorf("host_id error, connection from %s(%s) conflicts with another one", r.RemoteAddr, hostID) w.WriteHeader(http.StatusConflict) s.RUnlock() return } } s.RUnlock() } var upgrader = websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, } conn, err := upgrader.Upgrade(w, &r.Request, nil) if err != nil { return } c := &WSClient{ read: make(chan []byte, maxMessages), send: make(chan []byte, maxMessages), conn: conn, server: s, host: hostID, kind: r.Header.Get("X-Client-Type"), } logging.GetLogger().Infof("New WebSocket Connection from %s : URI path %s", conn.RemoteAddr().String(), r.URL.Path) s.register <- c var wg sync.WaitGroup wg.Add(2) quit := make(chan struct{}) go c.writePump(&wg, quit) go c.processMessages(&wg, quit) c.readPump() quit <- struct{}{} quit <- struct{}{} close(c.read) close(c.send) wg.Wait() }
func (s *GraphServer) OnMessage(c *shttp.WSClient, msg shttp.WSMessage) { if msg.Namespace != Namespace { return } s.Graph.Lock() defer s.Graph.Unlock() msgType, obj, err := UnmarshalWSMessage(msg) if err != nil { logging.GetLogger().Errorf("Graph: Unable to parse the event %v: %s", msg, err.Error()) return } switch msgType { case "SyncRequest": reply := shttp.NewWSMessage(Namespace, "SyncReply", s.Graph.WithContext(obj.(GraphContext))) c.SendWSMessage(reply) case "SubGraphDeleted": n := obj.(*Node) logging.GetLogger().Debugf("Got SubGraphDeleted event from the node %s", n.ID) node := s.Graph.GetNode(n.ID) if node != nil { s.Graph.DelSubGraph(node) } case "NodeUpdated": n := obj.(*Node) node := s.Graph.GetNode(n.ID) if node != nil { s.Graph.SetMetadata(node, n.metadata) } case "NodeDeleted": s.Graph.DelNode(obj.(*Node)) case "NodeAdded": n := obj.(*Node) if s.Graph.GetNode(n.ID) == nil { s.Graph.AddNode(n) } case "EdgeUpdated": e := obj.(*Edge) edge := s.Graph.GetEdge(e.ID) if edge != nil { s.Graph.SetMetadata(edge, e.metadata) } case "EdgeDeleted": s.Graph.DelEdge(obj.(*Edge)) case "EdgeAdded": e := obj.(*Edge) if s.Graph.GetEdge(e.ID) == nil { s.Graph.AddEdge(e) } } }
func NewAgentAnalyzerServerConn(addr *net.UDPAddr) (a *AgentAnalyzerServerConn, err error) { a = &AgentAnalyzerServerConn{mode: UDP} certPEM := config.GetConfig().GetString("analyzer.X509_cert") keyPEM := config.GetConfig().GetString("analyzer.X509_key") clientCertPEM := config.GetConfig().GetString("agent.X509_cert") if len(certPEM) > 0 && len(keyPEM) > 0 { cert, err := tls.LoadX509KeyPair(certPEM, keyPEM) if err != nil { logging.GetLogger().Fatalf("Can't read X509 key pair set in config : cert '%s' key '%s'", certPEM, keyPEM) } rootPEM, err := ioutil.ReadFile(clientCertPEM) if err != nil { logging.GetLogger().Fatalf("Failed to open root certificate '%s' : %s", certPEM, err.Error()) } roots := x509.NewCertPool() ok := roots.AppendCertsFromPEM([]byte(rootPEM)) if !ok { logging.GetLogger().Fatal("Failed to parse root certificate " + certPEM) } cfgTLS := &tls.Config{ ClientCAs: roots, ClientAuth: tls.RequireAndVerifyClientCert, Certificates: []tls.Certificate{cert}, MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: []uint16{ tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, }, } cfgTLS.BuildNameToCertificate() a.tlsListen, err = tls.Listen("tcp", fmt.Sprintf("%s:%d", common.IPToString(addr.IP), addr.Port+1), cfgTLS) if err != nil { return nil, err } a.mode = TLS logging.GetLogger().Info("Analyzer listen agents on TLS socket") return a, nil } a.udpConn, err = net.ListenUDP("udp", addr) logging.GetLogger().Info("Analyzer listen agents on UDP socket") return a, err }
func (mapper *OpenContrailMapper) linkToVhost(node *graph.Node) { name := node.Metadata()["Name"].(string) if mapper.vHost != nil { md := graph.Metadata{"RelationType": "layer2"} if !mapper.graph.AreLinked(node, mapper.vHost, md) { logging.GetLogger().Debugf("Link %s to %s", name, mapper.vHost.Metadata()["Name"].(string)) mapper.graph.Link(node, mapper.vHost, md) } } else { logging.GetLogger().Debugf("Add node %s to pending link list", name) mapper.pendingLinks = append(mapper.pendingLinks, node) } }
func (a *AgentAnalyzerClientConn) Close() { if a.tlsConnClient != nil { err := a.tlsConnClient.Close() if err != nil { logging.GetLogger().Errorf("Close error %v", err) } return } err := a.udpConn.Close() if err != nil { logging.GetLogger().Errorf("Close error %v", err) } }
func (a *AgentAnalyzerServerConn) SetDeadline(t time.Time) { switch a.mode { case TLS: err := a.tlsConn.SetReadDeadline(t) if err != nil { logging.GetLogger().Errorf("SetReadDeadline %v", err) } case UDP: err := a.udpConn.SetDeadline(t) if err != nil { logging.GetLogger().Errorf("SetDeadline %v", err) } } }
func (mapper *OpenContrailMapper) nodeUpdater() { body := func(nodeID graph.Identifier) { node := mapper.graph.GetNode(nodeID) if node == nil { return } name, ok := node.Metadata()["Name"] if !ok { return } itf, err := getInterfaceFromIntrospect(mapper.agentHost, mapper.agentPort, name.(string)) if err != nil { logging.GetLogger().Debugf("%s\n", err) return } mapper.graph.Lock() defer mapper.graph.Unlock() // We get the node again to be sure to have the latest // version. node = mapper.graph.GetNode(nodeID) if node == nil || node.Metadata()["Name"] != name { logging.GetLogger().Warningf("Node with name %s has changed", name) return } if node.Metadata()["Type"].(string) == "vhost" { mapper.onVhostAdded(node, itf) } else { logging.GetLogger().Debugf("Retrieve extIDs for %s", name.(string)) extIDs, err := mapper.retrieveExtIDs(node.Metadata(), itf) if err != nil { return } mapper.updateNode(node, extIDs) mapper.linkToVhost(node) } } logging.GetLogger().Debugf("Starting OpenContrail updater (using the vrouter agent on %s:%d)", mapper.agentHost, mapper.agentPort) for nodeID := range mapper.nodeUpdaterChan { // We launch the node update in a routine because // several retries can be realized to get the // interface from the contrail introspect go body(nodeID) } logging.GetLogger().Debugf("Stopping OpenContrail updater") }
func (a *AgentAnalyzerServerConn) Close() { switch a.mode { case TLS: err := a.tlsConn.Close() if err != nil { logging.GetLogger().Errorf("Close error %v", err) } case UDP: err := a.udpConn.Close() if err != nil { logging.GetLogger().Errorf("Close error %v", err) } } }
func (u *NetNSProbe) Register(path string, extraMetadata graph.Metadata) *graph.Node { ns, ok := u.pathToNetNS[path] if !ok { var s syscall.Stat_t fd, err := syscall.Open(path, syscall.O_RDONLY, 0) if err != nil { logging.GetLogger().Errorf("Error registering namespace %s: %s", path, err.Error()) return nil } defer syscall.Close(fd) if err := syscall.Fstat(fd, &s); err != nil { logging.GetLogger().Errorf("Error reading namespace %s: %s", path, err.Error()) return nil } ns = &NetNs{path: path, dev: s.Dev, ino: s.Ino} u.pathToNetNS[path] = ns } u.Lock() defer u.Unlock() nsString := ns.String() probe, ok := u.nsnlProbes[nsString] if ok { probe.useCount++ logging.GetLogger().Debugf("Increasing counter for namespace %s to %d", nsString, probe.useCount) return probe.Root } u.Graph.Lock() defer u.Graph.Unlock() logging.GetLogger().Debugf("Network Namespace added: %s", nsString) metadata := graph.Metadata{"Name": getNetNSName(path), "Type": "netns", "Path": path} if extraMetadata != nil { for k, v := range extraMetadata { metadata[k] = v } } n := u.Graph.NewNode(graph.GenID(), metadata) u.Graph.Link(u.Root, n, graph.Metadata{"RelationType": "ownership"}) nu := NewNetNsNetLinkTopoUpdater(u.Graph, n) go nu.Start(ns) u.nsnlProbes[nsString] = nu return n }
func TestDockerShareNamespace(t *testing.T) { g := helper.NewGraph(t) agent := helper.StartAgentWithConfig(t, confTopology) defer agent.Stop() setupCmds := []helper.Cmd{ {"docker run -d -t -i --name test-skydive-docker busybox", false}, {"docker run -d -t -i --name test-skydive-docker2 --net=container:test-skydive-docker busybox", false}, } tearDownCmds := []helper.Cmd{ {"docker rm -f test-skydive-docker", false}, {"docker rm -f test-skydive-docker2", false}, } tr := traversal.NewGraphTraversal(g) testPassed := false onChange := func(ws *websocket.Conn) { g.Lock() defer g.Unlock() if !testPassed { tv := tr.V().Has("Type", "netns", "Manager", "docker") logging.GetLogger().Warningf("Looking for docker namespace: %+v (%d)\n", tv.Values(), len(tv.Values())) if len(tv.Values()) > 1 { t.Error("There should be only one namespace managed by Docker") ws.Close() } else if len(tv.Values()) == 1 { tv = tv.Out().Has("Type", "container", "Docker/ContainerName", traversal.Within("/test-skydive-docker", "/test-skydive-docker2")) logging.GetLogger().Warningf("Found namespace, looking for containers: %+v (%d)\n", tv.Values(), len(tv.Values())) if len(tv.Values()) == 2 { testPassed = true ws.Close() } } } } testTopology(t, g, setupCmds, onChange) if !testPassed { t.Error("test not executed or failed") } testCleanup(t, g, tearDownCmds, []string{"test-skydive-docker"}) }