Ejemplo n.º 1
0
// EnableK8sWatcher watches for policy, services and endpoint changes on the kurbenetes
// api server defined in the receiver's daemon k8sClient. Re-syncs all state from the
// kubernetes api server at the given reSyncPeriod duration.
func (d *Daemon) EnableK8sWatcher(reSyncPeriod time.Duration) error {
	if !d.conf.IsK8sEnabled() {
		return nil
	}

	_, policyController := cache.NewInformer(
		cache.NewListWatchFromClient(d.k8sClient.Extensions().GetRESTClient(),
			"networkpolicies", v1.NamespaceAll, fields.Everything()),
		&v1beta1.NetworkPolicy{},
		reSyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    d.policyAddFn,
			UpdateFunc: d.policyModFn,
			DeleteFunc: d.policyDelFn,
		},
	)
	go policyController.Run(wait.NeverStop)

	_, svcController := cache.NewInformer(
		cache.NewListWatchFromClient(d.k8sClient.Core().GetRESTClient(),
			"services", v1.NamespaceAll, fields.Everything()),
		&v1.Service{},
		reSyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    d.serviceAddFn,
			UpdateFunc: d.serviceModFn,
			DeleteFunc: d.serviceDelFn,
		},
	)
	go svcController.Run(wait.NeverStop)

	_, endpointController := cache.NewInformer(
		cache.NewListWatchFromClient(d.k8sClient.Core().GetRESTClient(),
			"endpoints", v1.NamespaceAll, fields.Everything()),
		&v1.Endpoints{},
		reSyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    d.endpointAddFn,
			UpdateFunc: d.endpointModFn,
			DeleteFunc: d.endpointDelFn,
		},
	)
	go endpointController.Run(wait.NeverStop)

	return nil
}
Ejemplo n.º 2
0
// WatchEndpoints starts the watch of Kubernetes Endpoints resources and updates the corresponding store
func (c *clientImpl) WatchEndpoints(watchCh chan<- interface{}, stopCh <-chan struct{}) {
	source := cache.NewListWatchFromClient(
		c.clientset.CoreClient,
		"endpoints",
		api.NamespaceAll,
		fields.Everything())

	c.epStore, c.epController = cache.NewInformer(
		source,
		&v1.Endpoints{},
		resyncPeriod,
		newResourceEventHandlerFuncs(watchCh))
	go c.epController.Run(stopCh)
}
Ejemplo n.º 3
0
// NsWatch is a generator that watches namespace related events in
// kubernetes API and publishes this events to a channel.
func (l *KubeListener) nsWatch(done <-chan struct{}, url string) (chan Event, error) {
	out := make(chan Event, l.namespaceBufferSize)

	// watcher watches all namespaces.
	watcher := cache.NewListWatchFromClient(
		l.kubeClient.CoreClient,
		"namespaces",
		api.NamespaceAll,
		fields.Everything(),
	)

	_, controller := cache.NewInformer(
		watcher,
		&v1.Namespace{},
		0,
		cache.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				out <- Event{
					Type:   KubeEventAdded,
					Object: obj,
				}
			},
			UpdateFunc: func(old, obj interface{}) {
				out <- Event{
					Type:   KubeEventModified,
					Object: obj,
				}
			},
			DeleteFunc: func(obj interface{}) {
				out <- Event{
					Type:   KubeEventDeleted,
					Object: obj,
				}
			},
		})

	go controller.Run(done)

	return out, nil
}
Ejemplo n.º 4
0
Archivo: main.go Proyecto: romana/core
func main() {
	// Accept a kubernetes config file of try the default location.
	var kubeConfig = flag.String("kubeconfig", os.Getenv("HOME")+"/.kube/config",
		"Kubernetes config file.")
	var romanaConfig = flag.String("romanaconfig", os.Getenv("HOME")+"/.romana.yaml",
		"Romana config file.")
	version := flag.Bool("version", false, "Build Information.")
	flag.Parse()

	if *version {
		fmt.Println(common.BuildInfo())
		return
	}

	if *kubeConfig == "" {
		log.Println("Error: must have kubernetes config files specified.")
		os.Exit(1)
	}

	if err := initConfig(*romanaConfig); err != nil {
		log.Println("Error reading romana config file: ", err)
		os.Exit(1)
	}

	// Since romana config was successful above, now set rootURL from config.
	setRomanaRootURL()

	// Try generating config for kubernetes client-go from flags passed,
	// so that we can connect to kubernetes using them.
	kConfig, err := clientcmd.BuildConfigFromFlags("", *kubeConfig)
	if err != nil {
		log.Println("Error: ", err.Error())
		os.Exit(1)
	}

	// Get a set of REST clients which connect to kubernetes services
	// from the config generated above.
	restClientSet, err := kubernetes.NewForConfig(kConfig)
	if err != nil {
		log.Println("Error: ", err.Error())
		os.Exit(1)
	}

	// Channel for stopping watching node events.
	stop := make(chan struct{}, 1)

	// nodeWatcher is a new ListWatch object created from the specified
	// restClientSet above for watching node events.
	nodeWatcher := cache.NewListWatchFromClient(
		restClientSet.CoreClient,
		"nodes",
		api.NamespaceAll,
		fields.Everything())

	// Setup a notifications for specific events using NewInformer.
	_, nodeInformer := cache.NewInformer(
		nodeWatcher,
		&v1.Node{},
		time.Minute,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    kubernetesAddNodeEventHandler,
			UpdateFunc: kubernetesUpdateNodeEventHandler,
			DeleteFunc: kubernetesDeleteNodeEventHandler,
		},
	)

	log.Println("Starting receving node events.")
	go nodeInformer.Run(stop)

	// Set up channel on which to send signal notifications.
	// We must use a buffered channel or risk missing the signal
	// if we're not ready to receive when the signal is sent.
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt)

	// Block until a signal is received.
	<-c

	// Stop watching node events.
	close(stop)
	log.Println("Stopped watching node events and quitting watchnodes.")
}
Ejemplo n.º 5
0
// ProduceNewPolicyEvents produces kubernetes network policy events that arent applied
// in romana policy service yet.
func ProduceNewPolicyEvents(out chan Event, done <-chan struct{}, KubeListener *KubeListener) {
	var sleepTime time.Duration = 1
	log.Infof("Listening for kubernetes network policies")

	// watcher watches all network policy.
	watcher := cache.NewListWatchFromClient(
		KubeListener.kubeClient.ExtensionsClient,
		"networkpolicies",
		api.NamespaceAll,
		fields.Everything(),
	)

	store, controller := cache.NewInformer(
		watcher,
		&v1beta1.NetworkPolicy{},
		0,
		cache.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				out <- Event{
					Type:   KubeEventAdded,
					Object: obj,
				}
			},
			UpdateFunc: func(old, obj interface{}) {
				out <- Event{
					Type:   KubeEventModified,
					Object: obj,
				}
			},
			DeleteFunc: func(obj interface{}) {
				out <- Event{
					Type:   KubeEventDeleted,
					Object: obj,
				}
			},
		})

	go controller.Run(done)
	time.Sleep(sleepTime)

	var kubePolicyList []v1beta1.NetworkPolicy
	for _, kp := range store.List() {
		kubePolicyList = append(kubePolicyList, kp.(v1beta1.NetworkPolicy))
	}

	newEvents, oldPolicies, err := KubeListener.syncNetworkPolicies(kubePolicyList)
	if err != nil {
		log.Errorf("Failed to sync romana policies with kube policies, sync failed with %s", err)
	}

	log.Infof("Produce policies detected %d new kubernetes policies and %d old romana policies", len(newEvents), len(oldPolicies))

	// Create new kubernetes policies
	for en, _ := range newEvents {
		out <- newEvents[en]
	}

	// Delete old romana policies.
	// TODO find a way to remove policy deletion from this function. Stas.
	policyUrl, err := KubeListener.restClient.GetServiceUrl("policy")
	if err != nil {
		log.Errorf("Failed to discover policy url before deleting outdated romana policies")
	}

	for k, _ := range oldPolicies {
		err = KubeListener.restClient.Delete(fmt.Sprintf("%s/policies/%d", policyUrl, oldPolicies[k].ID), nil, &oldPolicies)
		if err != nil {
			log.Errorf("Sync policies detected obsolete policy %d but failed to delete, %s", oldPolicies[k].ID, err)
		}
	}
}
Ejemplo n.º 6
0
// Run implements the TargetProvider interface.
func (k *Kubernetes) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
	defer close(ch)

	rclient := k.client.Core().GetRESTClient()

	switch k.role {
	case "endpoints":
		elw := cache.NewListWatchFromClient(rclient, "endpoints", api.NamespaceAll, nil)
		slw := cache.NewListWatchFromClient(rclient, "services", api.NamespaceAll, nil)
		plw := cache.NewListWatchFromClient(rclient, "pods", api.NamespaceAll, nil)
		eps := NewEndpoints(
			k.logger.With("kubernetes_sd", "endpoint"),
			cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
			cache.NewSharedInformer(elw, &apiv1.Endpoints{}, resyncPeriod),
			cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
		)
		go eps.endpointsInf.Run(ctx.Done())
		go eps.serviceInf.Run(ctx.Done())
		go eps.podInf.Run(ctx.Done())

		for !eps.serviceInf.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		for !eps.endpointsInf.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		for !eps.podInf.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		eps.Run(ctx, ch)

	case "pod":
		plw := cache.NewListWatchFromClient(rclient, "pods", api.NamespaceAll, nil)
		pod := NewPod(
			k.logger.With("kubernetes_sd", "pod"),
			cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncPeriod),
		)
		go pod.informer.Run(ctx.Done())

		for !pod.informer.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		pod.Run(ctx, ch)

	case "service":
		slw := cache.NewListWatchFromClient(rclient, "services", api.NamespaceAll, nil)
		svc := NewService(
			k.logger.With("kubernetes_sd", "service"),
			cache.NewSharedInformer(slw, &apiv1.Service{}, resyncPeriod),
		)
		go svc.informer.Run(ctx.Done())

		for !svc.informer.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		svc.Run(ctx, ch)

	case "node":
		nlw := cache.NewListWatchFromClient(rclient, "nodes", api.NamespaceAll, nil)
		node := NewNode(
			k.logger.With("kubernetes_sd", "node"),
			cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncPeriod),
		)
		go node.informer.Run(ctx.Done())

		for !node.informer.HasSynced() {
			time.Sleep(100 * time.Millisecond)
		}
		node.Run(ctx, ch)

	default:
		k.logger.Errorf("unknown Kubernetes discovery kind %q", k.role)
	}

	<-ctx.Done()
}