func NewOvsSFlowProbesHandlerFromConfig(tb *probes.TopologyProbeBundle, g *graph.Graph, p *mappings.FlowMappingPipeline, a *analyzer.Client) *OvsSFlowProbesHandler { probe := tb.GetProbe("ovsdb") if probe == nil { return nil } agent, err := sflow.NewSFlowAgentFromConfig(g) if err != nil { logging.GetLogger().Errorf("Unable to start an OVS SFlow probe handler: %s", err.Error()) return nil } agent.SetMappingPipeline(p) if a != nil { agent.SetAnalyzerClient(a) } expire := config.GetConfig().GetInt("cache.expire") cleanup := config.GetConfig().GetInt("cache.cleanup") o := NewOvsSFlowProbesHandler(probe.(*probes.OvsdbProbe), agent, expire, cleanup) agent.SetProbePathGetter(o) return o }
func BackendFromConfig() (GraphBackend, error) { backend := config.GetConfig().Section("graph").Key("backend").String() if len(backend) == 0 { backend = "memory" } switch backend { case "memory": return NewMemoryBackend() case "gremlin": gremlin := config.GetConfig().Section("graph").Key("gremlin").Strings(":") if len(gremlin) != 2 { return nil, errors.New("Config file is misconfigured, gremlin host:ip error") } host := gremlin[0] port, err := strconv.Atoi(gremlin[1]) if err != nil { return nil, errors.New("Config file is misconfigured, gremlin host:ip error") } return NewGremlinBackend(host, port) default: return nil, errors.New("Config file is misconfigured, graph backend unknown: " + backend) } }
func init() { Analyzer.Flags().String("listen", "127.0.0.1:8082", "address and port for the analyzer API") config.GetConfig().BindPFlag("analyzer.listen", Analyzer.Flags().Lookup("listen")) Analyzer.Flags().Int("flowtable-expire", 600, "expiration time for flowtable entries") config.GetConfig().BindPFlag("analyzer.flowtable_expire", Analyzer.Flags().Lookup("flowtable-expire")) Analyzer.Flags().Int("flowtable-update", 60, "send updated flows to storage every time (second)") config.GetConfig().BindPFlag("analyzer.flowtable_update", Analyzer.Flags().Lookup("flowtable-update")) Analyzer.Flags().String("elasticsearch", "127.0.0.1:9200", "elasticsearch server") config.GetConfig().BindPFlag("storage.elasticsearch", Analyzer.Flags().Lookup("elasticsearch")) Analyzer.Flags().String("etcd", "http://127.0.0.1:2379", "etcd servers") config.GetConfig().BindPFlag("etcd.servers", Analyzer.Flags().Lookup("etcd")) Analyzer.Flags().Bool("embed-etcd", true, "embed etcd") config.GetConfig().BindPFlag("etcd.embedded", Analyzer.Flags().Lookup("embed-etcd")) Analyzer.Flags().Int("etcd-port", 2379, "embedded etcd port") config.GetConfig().BindPFlag("etcd.port", Analyzer.Flags().Lookup("etcd-port")) Analyzer.Flags().String("etcd-datadir", "/tmp/skydive-etcd", "embedded etcd data folder") config.GetConfig().BindPFlag("etcd.data_dir", Analyzer.Flags().Lookup("etcd-datadir")) Analyzer.Flags().String("graph-backend", "memory", "graph backend") config.GetConfig().BindPFlag("graph.backend", Analyzer.Flags().Lookup("graph-backend")) Analyzer.Flags().String("gremlin", "ws://127.0.0.1:8182", "gremlin server") config.GetConfig().BindPFlag("graph.gremlin", Analyzer.Flags().Lookup("gremlin")) }
func (a *Agent) Start() { var err error go a.WSServer.ListenAndServe() addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client %s", err.Error()) os.Exit(1) } if addr != "" { authOptions := &shttp.AuthenticationOpts{ Username: config.GetConfig().GetString("agent.analyzer_username"), Password: config.GetConfig().GetString("agent.analyzer_password"), } authClient := shttp.NewAuthenticationClient(addr, port, authOptions) a.WSClient, err = shttp.NewWSAsyncClient(addr, port, "/ws", authClient) if err != nil { logging.GetLogger().Errorf("Unable to instantiate analyzer client %s", err.Error()) os.Exit(1) } graph.NewForwarder(a.WSClient, a.Graph) a.WSClient.Connect() // send a first reset event to the analyzers a.Graph.DelSubGraph(a.Root) } a.TopologyProbeBundle = tprobes.NewTopologyProbeBundleFromConfig(a.Graph, a.Root) a.TopologyProbeBundle.Start() a.FlowProbeBundle = fprobes.NewFlowProbeBundleFromConfig(a.TopologyProbeBundle, a.Graph) a.FlowProbeBundle.Start() if addr != "" { a.EtcdClient, err = etcd.NewEtcdClientFromConfig() if err != nil { logging.GetLogger().Errorf("Unable to start etcd client %s", err.Error()) os.Exit(1) } captureHandler := &api.BasicApiHandler{ ResourceHandler: &api.CaptureHandler{}, EtcdKeyAPI: a.EtcdClient.KeysApi, } l, err := fprobes.NewOnDemandProbeListener(a.FlowProbeBundle, a.Graph, captureHandler) if err != nil { logging.GetLogger().Errorf("Unable to start on-demand flow probe %s", err.Error()) os.Exit(1) } a.OnDemandProbeListener = l a.OnDemandProbeListener.Start() } go a.HTTPServer.ListenAndServe() }
func NewNeutronMapperFromConfig(g *graph.Graph) (*NeutronMapper, error) { authURL := config.GetConfig().GetString("openstack.auth_url") username := config.GetConfig().GetString("openstack.username") password := config.GetConfig().GetString("openstack.password") tenantName := config.GetConfig().GetString("openstack.tenant_name") regionName := config.GetConfig().GetString("openstack.region_name") return NewNeutronMapper(g, authURL, username, password, tenantName, regionName) }
func NewGraphFlowEnhancer(g *graph.Graph) (*GraphFlowEnhancer, error) { mapper := &GraphFlowEnhancer{ Graph: g, } expire := config.GetConfig().GetInt("cache.expire") cleanup := config.GetConfig().GetInt("cache.cleanup") mapper.cache = cache.New(time.Duration(expire)*time.Second, time.Duration(cleanup)*time.Second) mapper.cacheUpdaterChan = make(chan string, 200) go mapper.cacheUpdater() return mapper, nil }
func init() { Agent.Flags().String("listen", "127.0.0.1:8081", "address and port for the agent API") config.GetConfig().BindPFlag("agent.listen", Agent.Flags().Lookup("listen")) Agent.Flags().String("ovsdb", "127.0.0.1:6400", "ovsdb connection") config.GetConfig().BindPFlag("ovs.ovsdb", Agent.Flags().Lookup("ovsdb")) Agent.Flags().String("sflow-listen", "127.0.0.1:6345", "listen parameter for the sflow agent") config.GetConfig().BindPFlag("sflow.listen", Agent.Flags().Lookup("sflow-listen")) Agent.Flags().Int("flowtable-expire", 300, "expiration time for flowtable entries") config.GetConfig().BindPFlag("agent.flowtable_expire", Agent.Flags().Lookup("flowtable-expire")) Agent.Flags().Int("flowtable-update", 30, "send updated flows to analyzer every time (second)") config.GetConfig().BindPFlag("agent.flowtable_update", Agent.Flags().Lookup("flowtable-update")) }
func initLogger() error { initSkydiveLogger() cfg := config.GetConfig() if cfg == nil { return nil } sec, err := cfg.GetSection("logging") if err != nil { return nil } for cfgPkg, cfgLvl := range sec.KeysHash() { pkg := strings.TrimSpace(cfgPkg) lvl := strings.TrimSpace(cfgLvl) if pkg == "default" { err = newLogger("default", lvl) } else { err = newLogger("github.com/redhat-cip/skydive/"+pkg, lvl) } if err != nil { return errors.New("Can't parse [logging] section line : \"" + pkg + " " + lvl + "\" " + err.Error()) } } return nil }
func BackendFromConfig() (GraphBackend, error) { backend := config.GetConfig().GetString("graph.backend") if len(backend) == 0 { backend = "memory" } switch backend { case "memory": return NewMemoryBackend() case "gremlin": addr, port, err := getGremlinAddrPort() if err != nil { return nil, err } return NewGremlinBackend(addr, port) case "titangraph": addr, port, err := getGremlinAddrPort() if err != nil { return nil, err } return NewTitangraphBackend(addr, port) default: return nil, errors.New("Config file is misconfigured, graph backend unknown: " + backend) } }
func NewNetLinkMapper() (*NetLinkMapper, error) { mapper := &NetLinkMapper{} expire, err := config.GetConfig().Section("cache").Key("expire").Int() if err != nil { return nil, err } cleanup, err := config.GetConfig().Section("cache").Key("cleanup").Int() if err != nil { return nil, err } mapper.cache = cache.New(time.Duration(expire)*time.Second, time.Duration(cleanup)*time.Second) mapper.cacheUpdaterChan = make(chan uint32) go mapper.cacheUpdater() return mapper, nil }
func NewSFlowProbeFromConfig(g *graph.Graph) (*SFlowProbe, error) { addr, port, err := config.GetHostPortAttributes("sflow", "listen") if err != nil { return nil, err } expire, err := config.GetConfig().Section("cache").Key("expire").Int() if err != nil { return nil, err } cleanup, err := config.GetConfig().Section("cache").Key("cleanup").Int() if err != nil { return nil, err } return NewSFlowProbe(addr, port, g, expire, cleanup) }
func NewFlowProbeBundleFromConfig(tb *probes.TopologyProbeBundle, g *graph.Graph) *FlowProbeBundle { list := config.GetConfig().GetStringSlice("agent.flow.probes") logging.GetLogger().Infof("Flow probes: %v", list) gfe := mappings.NewGraphFlowEnhancer(g) var aclient *analyzer.Client addr, port, err := config.GetAnalyzerClientAddr() if err != nil { logging.GetLogger().Errorf("Unable to parse analyzer client: %s", err.Error()) return nil } if addr != "" { aclient, err = analyzer.NewClient(addr, port) if err != nil { logging.GetLogger().Errorf("Analyzer client error %s:%d : %s", addr, port, err.Error()) return nil } } probes := make(map[string]probe.Probe) for _, t := range list { if _, ok := probes[t]; ok { continue } switch t { case "ovssflow": ofe := mappings.NewOvsFlowEnhancer(g) pipeline := mappings.NewFlowMappingPipeline(gfe, ofe) o := NewOvsSFlowProbesHandler(tb, g, pipeline, aclient) if o != nil { probes[t] = o } case "pcap": pipeline := mappings.NewFlowMappingPipeline(gfe) o := NewPcapProbesHandler(tb, g, pipeline, aclient) if o != nil { probes[t] = o } default: logging.GetLogger().Errorf("unknown probe type %s", t) } } p := probe.NewProbeBundle(probes) return &FlowProbeBundle{ ProbeBundle: *p, Graph: g, } }
func BackendFromConfig() (GraphBackend, error) { backend := config.GetConfig().GetString("graph.backend") if len(backend) == 0 { backend = "memory" } switch backend { case "memory": return NewMemoryBackend() case "gremlin": endpoint := config.GetConfig().GetString("graph.gremlin") return NewGremlinBackend(endpoint) case "titangraph": endpoint := config.GetConfig().GetString("graph.gremlin") return NewTitangraphBackend(endpoint) default: return nil, errors.New("Config file is misconfigured, graph backend unknown: " + backend) } }
func (sfa *SFlowAgent) start() error { addr := net.UDPAddr{ Port: sfa.Port, IP: net.ParseIP(sfa.Addr), } conn, err := net.ListenUDP("udp", &addr) if err != nil { logging.GetLogger().Errorf("Unable to listen on port %d: %s", sfa.Port, err.Error()) return err } defer conn.Close() conn.SetDeadline(time.Now().Add(1 * time.Second)) sfa.wg.Add(1) defer sfa.wg.Done() sfa.running.Store(true) sfa.flowTable = flow.NewTable() defer sfa.flowTable.UnregisterAll() cfgFlowtable_expire := config.GetConfig().GetInt("agent.flowtable_expire") sfa.flowTable.RegisterExpire(sfa.asyncFlowPipeline, time.Duration(cfgFlowtable_expire)*time.Second) cfgFlowtable_update := config.GetConfig().GetInt("agent.flowtable_update") sfa.flowTable.RegisterUpdated(sfa.asyncFlowPipeline, time.Duration(cfgFlowtable_update)*time.Second) for sfa.running.Load() == true { select { case now := <-sfa.flowTable.GetExpireTicker(): sfa.flowTable.Expire(now) case now := <-sfa.flowTable.GetUpdatedTicker(): sfa.flowTable.Updated(now) case <-sfa.flush: sfa.flowTable.ExpireNow() sfa.flushDone <- true default: sfa.feedFlowTable(conn) } } return nil }
func NewGraphFlowEnhancer(g *graph.Graph) (*GraphFlowEnhancer, error) { mapper := &GraphFlowEnhancer{ Graph: g, } expire, err := config.GetConfig().Section("cache").Key("expire").Int() if err != nil { return nil, err } cleanup, err := config.GetConfig().Section("cache").Key("cleanup").Int() if err != nil { return nil, err } mapper.cache = cache.New(time.Duration(expire)*time.Second, time.Duration(cleanup)*time.Second) mapper.cacheUpdaterChan = make(chan string, 200) go mapper.cacheUpdater() return mapper, nil }
func NewServerFromConfig(router *mux.Router) (*Server, error) { addr, port, err := config.GetHostPortAttributes("analyzer", "listen") if err != nil { logging.GetLogger().Errorf("Configuration error: %s", err.Error()) return nil, err } embedEtcd := config.GetConfig().GetBool("etcd.embedded") return NewServer(addr, port, router, embedEtcd) }
func (a *SFlowAgentAllocator) Alloc(uuid string, p flow.FlowProbePathSetter) (*SFlowAgent, error) { address := config.GetConfig().GetString("sflow.bind_address") if address == "" { address = "127.0.0.1" } min := config.GetConfig().GetInt("sflow.port_min") if min == 0 { min = 6345 } max := config.GetConfig().GetInt("sflow.port_max") if max == 0 { max = 6355 } a.Lock() defer a.Unlock() // check if there is an already allocated agent for this uuid for _, agent := range a.allocated { if uuid == agent.UUID { return agent, AgentAlreadyAllocated } } for i := min; i != max+1; i++ { if _, ok := a.allocated[i]; !ok { s := NewSFlowAgent(uuid, address, i, a.AnalyzerClient, a.FlowMappingPipeline) s.SetFlowProbePathSetter(p) a.allocated[i] = s s.Start() return s, nil } } return nil, errors.New("sflow port exhausted") }
func NewBasicAuthenticationBackendFromConfig() (*BasicAuthenticationBackend, error) { f := config.GetConfig().GetString("auth.basic.file") if _, err := os.Stat(f); err != nil { return nil, err } // TODO(safchain) add more providers h := auth.HtpasswdFileProvider(f) return &BasicAuthenticationBackend{ auth.NewBasicAuthenticator(basicAuthRealm, h), }, nil }
func NewAuthenticationBackendFromConfig() (AuthenticationBackend, error) { t := config.GetConfig().GetString("auth.type") switch t { case "basic": return NewBasicAuthenticationBackendFromConfig() case "keystone": return NewKeystoneAuthenticationBackendFromConfig(), nil default: return NewNoAuthenticationBackend(), nil } }
func NewServer(addr string, port int, router *mux.Router) (*Server, error) { backend, err := graph.BackendFromConfig() if err != nil { return nil, err } g, err := graph.NewGraph(backend) if err != nil { return nil, err } tserver := topology.NewServer(g, addr, port, router) tserver.RegisterStaticEndpoints() tserver.RegisterRpcEndpoints() alertmgr := graph.NewAlert(g, router) alertmgr.RegisterRpcEndpoints() gserver, err := graph.NewServerFromConfig(g, alertmgr, router) if err != nil { return nil, err } gfe, err := mappings.NewGraphFlowEnhancer(g) if err != nil { return nil, err } pipeline := mappings.NewFlowMappingPipeline([]mappings.FlowEnhancer{gfe}) flowtable := flow.NewFlowTable() server := &Server{ Addr: addr, Port: port, Router: router, TopoServer: tserver, GraphServer: gserver, FlowMappingPipeline: pipeline, FlowTable: flowtable, } server.RegisterStaticEndpoints() server.RegisterRpcEndpoints() cfgFlowtable_expire, err := config.GetConfig().Section("analyzer").Key("flowtable_expire").Int() if err != nil || cfgFlowtable_expire < 1 { logging.GetLogger().Error("Config flowTable_expire invalid value ", cfgFlowtable_expire, err.Error()) return nil, err } go flowtable.AsyncExpire(server.flowExpire, time.Duration(cfgFlowtable_expire)*time.Minute) return server, nil }
func NewNeutronMapper(g *graph.Graph, authURL string, username string, password string, tenantName string, regionName string) (*NeutronMapper, error) { mapper := &NeutronMapper{graph: g} opts := gophercloud.AuthOptions{ IdentityEndpoint: authURL, Username: username, Password: password, TenantName: tenantName, AllowReauth: true, } provider, err := openstack.AuthenticatedClient(opts) if err != nil { return nil, err } /* TODO(safchain) add config param for the Availability */ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ Name: "neutron", Region: regionName, Availability: gophercloud.AvailabilityPublic, }) if err != nil { return nil, err } mapper.client = client // Create a cache with a default expiration time of 5 minutes, and which // purges expired items every 30 seconds expire := config.GetConfig().GetInt("cache.expire") cleanup := config.GetConfig().GetInt("cache.cleanup") mapper.cache = cache.New(time.Duration(expire)*time.Second, time.Duration(cleanup)*time.Second) mapper.nodeUpdaterChan = make(chan graph.Identifier, 500) g.AddEventListener(mapper) return mapper, nil }
func getGremlinAddrPort() (string, int, error) { gremlin := strings.Split(config.GetConfig().GetString("graph.gremlin"), ":") if len(gremlin) != 2 { return "", 0, errors.New("Config file is misconfigured, gremlin host:ip error") } addr := gremlin[0] port, err := strconv.Atoi(gremlin[1]) if err != nil { return "", 0, errors.New("Config file is misconfigured, gremlin host:ip error") } return addr, port, nil }
func New() (*ElasticSearchStorage, error) { c := elastigo.NewConn() elasticonfig := strings.Split(config.GetConfig().GetString("storage.elasticsearch"), ":") if len(elasticonfig) != 2 { return nil, ErrBadConfig } c.Domain = elasticonfig[0] c.Port = elasticonfig[1] storage := &ElasticSearchStorage{connection: c} storage.started.Store(false) return storage, nil }
func (s *Server) SetStorageFromConfig() { if t := config.GetConfig().GetString("analyzer.storage"); t != "" { switch t { case "elasticsearch": storage, err := elasticseach.New() if err != nil { logging.GetLogger().Fatalf("Can't connect to ElasticSearch server: %v", err) } s.SetStorage(storage) default: logging.GetLogger().Fatalf("Storage type unknown: %s", t) os.Exit(1) } logging.GetLogger().Infof("Using %s as storage", t) } }
func NewNeutronMapper() (*NeutronMapper, error) { mapper := &NeutronMapper{} authURL := config.GetConfig().Section("openstack").Key("auth_url").String() username := config.GetConfig().Section("openstack").Key("username").String() password := config.GetConfig().Section("openstack").Key("password").String() tenantName := config.GetConfig().Section("openstack").Key("tenant_name").String() regionName := config.GetConfig().Section("openstack").Key("region_name").String() opts := gophercloud.AuthOptions{ IdentityEndpoint: authURL, Username: username, Password: password, TenantName: tenantName, } provider, err := openstack.AuthenticatedClient(opts) if err != nil { return nil, err } /* TODO(safchain) add config param for the Availability */ client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ Name: "neutron", Region: regionName, Availability: gophercloud.AvailabilityPublic, }) if err != nil { return nil, err } mapper.client = client // Create a cache with a default expiration time of 5 minutes, and which // purges expired items every 30 seconds expire, err := config.GetConfig().Section("cache").Key("expire").Int() if err != nil { return nil, err } cleanup, err := config.GetConfig().Section("cache").Key("cleanup").Int() if err != nil { return nil, err } mapper.cache = cache.New(time.Duration(expire)*time.Second, time.Duration(cleanup)*time.Second) mapper.cacheUpdaterChan = make(chan string) go mapper.cacheUpdater() return mapper, nil }
func initLogger() (err error) { initSkydiveLogger() cfg := config.GetConfig() for cfgPkg, cfgLvl := range cfg.GetStringMapString("logging") { pkg := strings.TrimSpace(cfgPkg) lvl := strings.TrimSpace(cfgLvl) if pkg == "default" { err = newLogger("default", lvl) } else { err = newLogger("github.com/redhat-cip/skydive/"+pkg, lvl) } if err != nil { return errors.New("Can't parse logging line : \"" + pkg + " " + lvl + "\" " + err.Error()) } } return }
func New() (*ElasticSearchStorage, error) { c := elastigo.NewConn() elasticonfig := strings.Split(config.GetConfig().GetString("storage.elasticsearch"), ":") if len(elasticonfig) != 2 { return nil, ErrBadConfig } c.Domain = elasticonfig[0] c.Port = elasticonfig[1] storage := &ElasticSearchStorage{connection: c} err := storage.initialize() if err != nil { return nil, err } return storage, nil }
func NewTopologyProbeBundleFromConfig(g *graph.Graph, n *graph.Node) *TopologyProbeBundle { list := config.GetConfig().GetStringSlice("agent.topology.probes") // FIX(safchain) once viper setdefault on nested key will be fixed move this // to config init if len(list) == 0 { list = []string{"netlink", "netns"} } logging.GetLogger().Infof("Topology probes: %v", list) probes := make(map[string]probe.Probe) for _, t := range list { if _, ok := probes[t]; ok { continue } switch t { case "netlink": probes[t] = NewNetLinkProbe(g, n) case "netns": probes[t] = NewNetNSProbe(g, n) case "ovsdb": probes[t] = NewOvsdbProbeFromConfig(g, n) case "docker": probes[t] = NewDockerProbeFromConfig(g, n) case "neutron": neutron, err := NewNeutronMapperFromConfig(g) if err != nil { logging.GetLogger().Errorf("Failed to initialize Neutron probe: %s", err.Error()) continue } probes[t] = neutron default: logging.GetLogger().Errorf("unknown probe type %s", t) } } p := probe.NewProbeBundle(probes) return &TopologyProbeBundle{*p} }
func init() { Analyzer.Flags().String("listen", "127.0.0.1:8082", "address and port for the analyzer API") config.GetConfig().BindPFlag("analyzer.listen", Analyzer.Flags().Lookup("listen")) Analyzer.Flags().Int("flowtable-expire", 10, "expiration time for flowtable entries") config.GetConfig().BindPFlag("analyzer.flowtable_expire", Analyzer.Flags().Lookup("flowtable-expire")) Analyzer.Flags().String("elasticsearch", "127.0.0.1:9200", "elasticsearch server") config.GetConfig().BindPFlag("storage.elasticsearch", Analyzer.Flags().Lookup("elasticsearch")) Analyzer.Flags().String("etcd", "http://127.0.0.1:2379", "etcd servers") config.GetConfig().BindPFlag("etcd.servers", Analyzer.Flags().Lookup("etcd")) Analyzer.Flags().Bool("embed-etcd", true, "embed etcd") config.GetConfig().BindPFlag("etcd.embedded", Analyzer.Flags().Lookup("embed-etcd")) Analyzer.Flags().Int("etcd-port", 2379, "embedded etcd port") config.GetConfig().BindPFlag("etcd.port", Analyzer.Flags().Lookup("etcd-port")) Analyzer.Flags().String("etcd-datadir", "/tmp/skydive-etcd", "embedded etcd data folder") config.GetConfig().BindPFlag("etcd.data_dir", Analyzer.Flags().Lookup("etcd-datadir")) }
func init() { Agent.Flags().String("listen", "127.0.0.1:8081", "address and port for the agent API") config.GetConfig().BindPFlag("agent.listen", Agent.Flags().Lookup("listen")) Agent.Flags().String("ovsdb", "127.0.0.1:6400", "ovsdb connection") config.GetConfig().BindPFlag("ovs.ovsdb", Agent.Flags().Lookup("ovsdb")) Agent.Flags().String("graph-backend", "memory", "graph backend") config.GetConfig().BindPFlag("graph.backend", Agent.Flags().Lookup("graph-backend")) Agent.Flags().String("gremlin", "127.0.0.1:8182", "gremlin server") config.GetConfig().BindPFlag("graph.gremlin", Agent.Flags().Lookup("gremlin")) Agent.Flags().String("sflow-listen", "127.0.0.1:6345", "listen parameter for the sflow agent") config.GetConfig().BindPFlag("sflow.listen", Agent.Flags().Lookup("sflow-listen")) Agent.Flags().Int("flowtable-expire", 10, "expiration time for flowtable entries") config.GetConfig().BindPFlag("agent.flowtable_expire", Agent.Flags().Lookup("flowtable-expire")) }