func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { action := testing.GenericActionImpl{} action.Verb = "get-field-selector" action.Resource = eventsResource c.Fake.Invokes(action, nil) return fields.Everything() }
// EnableK8sWatcher watches for policy, services and endpoint changes on the kurbenetes // api server defined in the receiver's daemon k8sClient. Re-syncs all state from the // kubernetes api server at the given reSyncPeriod duration. func (d *Daemon) EnableK8sWatcher(reSyncPeriod time.Duration) error { if !d.conf.IsK8sEnabled() { return nil } _, policyController := cache.NewInformer( cache.NewListWatchFromClient(d.k8sClient.Extensions().GetRESTClient(), "networkpolicies", v1.NamespaceAll, fields.Everything()), &v1beta1.NetworkPolicy{}, reSyncPeriod, cache.ResourceEventHandlerFuncs{ AddFunc: d.policyAddFn, UpdateFunc: d.policyModFn, DeleteFunc: d.policyDelFn, }, ) go policyController.Run(wait.NeverStop) _, svcController := cache.NewInformer( cache.NewListWatchFromClient(d.k8sClient.Core().GetRESTClient(), "services", v1.NamespaceAll, fields.Everything()), &v1.Service{}, reSyncPeriod, cache.ResourceEventHandlerFuncs{ AddFunc: d.serviceAddFn, UpdateFunc: d.serviceModFn, DeleteFunc: d.serviceDelFn, }, ) go svcController.Run(wait.NeverStop) _, endpointController := cache.NewInformer( cache.NewListWatchFromClient(d.k8sClient.Core().GetRESTClient(), "endpoints", v1.NamespaceAll, fields.Everything()), &v1.Endpoints{}, reSyncPeriod, cache.ResourceEventHandlerFuncs{ AddFunc: d.endpointAddFn, UpdateFunc: d.endpointModFn, DeleteFunc: d.endpointDelFn, }, ) go endpointController.Run(wait.NeverStop) return nil }
func addDefaultingFuncs(scheme *runtime.Scheme) error { return scheme.AddDefaultingFuncs( func(obj *ListOptions) { if obj.LabelSelector == nil { obj.LabelSelector = labels.Everything() } if obj.FieldSelector == nil { obj.FieldSelector = fields.Everything() } }, ) }
// WatchEndpoints starts the watch of Kubernetes Endpoints resources and updates the corresponding store func (c *clientImpl) WatchEndpoints(watchCh chan<- interface{}, stopCh <-chan struct{}) { source := cache.NewListWatchFromClient( c.clientset.CoreClient, "endpoints", api.NamespaceAll, fields.Everything()) c.epStore, c.epController = cache.NewInformer( source, &v1.Endpoints{}, resyncPeriod, newResourceEventHandlerFuncs(watchCh)) go c.epController.Run(stopCh) }
// WatchIngresses starts the watch of Kubernetes Ingresses resources and updates the corresponding store func (c *clientImpl) WatchIngresses(labelSelector labels.Selector, watchCh chan<- interface{}, stopCh <-chan struct{}) { source := NewListWatchFromClient( c.clientset.ExtensionsClient, "ingresses", api.NamespaceAll, fields.Everything(), labelSelector) c.ingStore, c.ingController = cache.NewInformer( source, &v1beta1.Ingress{}, resyncPeriod, newResourceEventHandlerFuncs(watchCh)) go c.ingController.Run(stopCh) }
func NewRootListAction(resource unversioned.GroupVersionResource, opts api.ListOptions) ListActionImpl { action := ListActionImpl{} action.Verb = "list" action.Resource = resource labelSelector := opts.LabelSelector if labelSelector == nil { labelSelector = labels.Everything() } fieldSelector := opts.FieldSelector if fieldSelector == nil { fieldSelector = fields.Everything() } action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action }
func NewRootWatchAction(resource unversioned.GroupVersionResource, opts api.ListOptions) WatchActionImpl { action := WatchActionImpl{} action.Verb = "watch" action.Resource = resource labelSelector := opts.LabelSelector if labelSelector == nil { labelSelector = labels.Everything() } fieldSelector := opts.FieldSelector if fieldSelector == nil { fieldSelector = fields.Everything() } action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, opts.ResourceVersion} return action }
func NewDeleteCollectionAction(resource unversioned.GroupVersionResource, namespace string, opts api.ListOptions) DeleteCollectionActionImpl { action := DeleteCollectionActionImpl{} action.Verb = "delete-collection" action.Resource = resource action.Namespace = namespace labelSelector := opts.LabelSelector if labelSelector == nil { labelSelector = labels.Everything() } fieldSelector := opts.FieldSelector if fieldSelector == nil { fieldSelector = fields.Everything() } action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} return action }
// NsWatch is a generator that watches namespace related events in // kubernetes API and publishes this events to a channel. func (l *KubeListener) nsWatch(done <-chan struct{}, url string) (chan Event, error) { out := make(chan Event, l.namespaceBufferSize) // watcher watches all namespaces. watcher := cache.NewListWatchFromClient( l.kubeClient.CoreClient, "namespaces", api.NamespaceAll, fields.Everything(), ) _, controller := cache.NewInformer( watcher, &v1.Namespace{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { out <- Event{ Type: KubeEventAdded, Object: obj, } }, UpdateFunc: func(old, obj interface{}) { out <- Event{ Type: KubeEventModified, Object: obj, } }, DeleteFunc: func(obj interface{}) { out <- Event{ Type: KubeEventDeleted, Object: obj, } }, }) go controller.Run(done) return out, nil }
func main() { // Accept a kubernetes config file of try the default location. var kubeConfig = flag.String("kubeconfig", os.Getenv("HOME")+"/.kube/config", "Kubernetes config file.") var romanaConfig = flag.String("romanaconfig", os.Getenv("HOME")+"/.romana.yaml", "Romana config file.") version := flag.Bool("version", false, "Build Information.") flag.Parse() if *version { fmt.Println(common.BuildInfo()) return } if *kubeConfig == "" { log.Println("Error: must have kubernetes config files specified.") os.Exit(1) } if err := initConfig(*romanaConfig); err != nil { log.Println("Error reading romana config file: ", err) os.Exit(1) } // Since romana config was successful above, now set rootURL from config. setRomanaRootURL() // Try generating config for kubernetes client-go from flags passed, // so that we can connect to kubernetes using them. kConfig, err := clientcmd.BuildConfigFromFlags("", *kubeConfig) if err != nil { log.Println("Error: ", err.Error()) os.Exit(1) } // Get a set of REST clients which connect to kubernetes services // from the config generated above. restClientSet, err := kubernetes.NewForConfig(kConfig) if err != nil { log.Println("Error: ", err.Error()) os.Exit(1) } // Channel for stopping watching node events. stop := make(chan struct{}, 1) // nodeWatcher is a new ListWatch object created from the specified // restClientSet above for watching node events. nodeWatcher := cache.NewListWatchFromClient( restClientSet.CoreClient, "nodes", api.NamespaceAll, fields.Everything()) // Setup a notifications for specific events using NewInformer. _, nodeInformer := cache.NewInformer( nodeWatcher, &v1.Node{}, time.Minute, cache.ResourceEventHandlerFuncs{ AddFunc: kubernetesAddNodeEventHandler, UpdateFunc: kubernetesUpdateNodeEventHandler, DeleteFunc: kubernetesDeleteNodeEventHandler, }, ) log.Println("Starting receving node events.") go nodeInformer.Run(stop) // Set up channel on which to send signal notifications. // We must use a buffered channel or risk missing the signal // if we're not ready to receive when the signal is sent. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) // Block until a signal is received. <-c // Stop watching node events. close(stop) log.Println("Stopped watching node events and quitting watchnodes.") }
// ProduceNewPolicyEvents produces kubernetes network policy events that arent applied // in romana policy service yet. func ProduceNewPolicyEvents(out chan Event, done <-chan struct{}, KubeListener *KubeListener) { var sleepTime time.Duration = 1 log.Infof("Listening for kubernetes network policies") // watcher watches all network policy. watcher := cache.NewListWatchFromClient( KubeListener.kubeClient.ExtensionsClient, "networkpolicies", api.NamespaceAll, fields.Everything(), ) store, controller := cache.NewInformer( watcher, &v1beta1.NetworkPolicy{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { out <- Event{ Type: KubeEventAdded, Object: obj, } }, UpdateFunc: func(old, obj interface{}) { out <- Event{ Type: KubeEventModified, Object: obj, } }, DeleteFunc: func(obj interface{}) { out <- Event{ Type: KubeEventDeleted, Object: obj, } }, }) go controller.Run(done) time.Sleep(sleepTime) var kubePolicyList []v1beta1.NetworkPolicy for _, kp := range store.List() { kubePolicyList = append(kubePolicyList, kp.(v1beta1.NetworkPolicy)) } newEvents, oldPolicies, err := KubeListener.syncNetworkPolicies(kubePolicyList) if err != nil { log.Errorf("Failed to sync romana policies with kube policies, sync failed with %s", err) } log.Infof("Produce policies detected %d new kubernetes policies and %d old romana policies", len(newEvents), len(oldPolicies)) // Create new kubernetes policies for en, _ := range newEvents { out <- newEvents[en] } // Delete old romana policies. // TODO find a way to remove policy deletion from this function. Stas. policyUrl, err := KubeListener.restClient.GetServiceUrl("policy") if err != nil { log.Errorf("Failed to discover policy url before deleting outdated romana policies") } for k, _ := range oldPolicies { err = KubeListener.restClient.Delete(fmt.Sprintf("%s/policies/%d", policyUrl, oldPolicies[k].ID), nil, &oldPolicies) if err != nil { log.Errorf("Sync policies detected obsolete policy %d but failed to delete, %s", oldPolicies[k].ID, err) } } }