예제 #1
1
// NewDeploymentConfigController creates a new DeploymentConfigController.
func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(kc.Events(""))
	recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"})

	c := &DeploymentConfigController{
		dn: oc,
		rn: kc,

		queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),

		recorder: recorder,
		codec:    codec,
	}

	c.dcStore.Indexer = dcInformer.GetIndexer()
	dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    c.addDeploymentConfig,
		UpdateFunc: c.updateDeploymentConfig,
		DeleteFunc: c.deleteDeploymentConfig,
	})
	c.rcStore.Indexer = rcInformer.GetIndexer()
	rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    c.addReplicationController,
		UpdateFunc: c.updateReplicationController,
		DeleteFunc: c.deleteReplicationController,
	})
	c.podStore.Indexer = podInformer.GetIndexer()

	c.dcStoreSynced = dcInformer.HasSynced
	c.rcStoreSynced = rcInformer.HasSynced
	c.podStoreSynced = podInformer.HasSynced

	return c
}
예제 #2
1
파일: deployer.go 프로젝트: pweil-/origin
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &Deployer{
		out:    out,
		errOut: errOut,
		until:  until,
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Spec.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
			}
		},
	}
}
예제 #3
1
파일: explain.go 프로젝트: resouer/contrib
// GetSwaggerSchema returns the swagger spec from master
func GetSwaggerSchema(apiVer string, kubeClient client.Interface) (*swagger.ApiDeclaration, error) {
	swaggerSchema, err := kubeClient.SwaggerSchema(apiVer)
	if err != nil {
		return nil, fmt.Errorf("couldn't read swagger schema from server: %v", err)
	}
	return swaggerSchema, nil
}
예제 #4
1
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	gcc := &GCController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "pod-garbage-collector"}),
			KubeClient: kubeClient,
		},
		threshold: threshold,
	}

	terminatedSelector := compileTerminatedPodSelector()

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
// TODO(floreks): This should be transactional to make sure that DS will not be deleted without pods
// Deletes daemon set with given name in given namespace and related pods.
// Also deletes services related to daemon set if deleteServices is true.
func DeleteDaemonSet(client k8sClient.Interface, namespace, name string,
	deleteServices bool) error {

	log.Printf("Deleting %s daemon set from %s namespace", name, namespace)

	if deleteServices {
		if err := DeleteDaemonSetServices(client, namespace, name); err != nil {
			return err
		}
	}

	pods, err := getRawDaemonSetPods(client, namespace, name)
	if err != nil {
		return err
	}

	if err := client.Extensions().DaemonSets(namespace).Delete(name); err != nil {
		return err
	}

	for _, pod := range pods.Items {
		if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted %s daemon set from %s namespace", name, namespace)

	return nil
}
// DeleteDaemonSetServices deletes services related to daemon set with given name in given namespace.
func DeleteDaemonSetServices(client k8sClient.Interface, namespace, name string) error {
	log.Printf("Deleting services related to %s daemon set from %s namespace", name,
		namespace)

	daemonSet, err := client.Extensions().DaemonSets(namespace).Get(name)
	if err != nil {
		return err
	}

	labelSelector, err := unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
	if err != nil {
		return err
	}

	services, err := getServicesForDSDeletion(client, labelSelector, namespace)
	if err != nil {
		return err
	}

	for _, service := range services {
		if err := client.Services(namespace).Delete(service.Name); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted services related to %s daemon set from %s namespace",
		name, namespace)

	return nil
}
예제 #7
0
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(rcInformer, podInformer framework.SharedIndexInformer, kc kclient.Interface, sa, image string, env []kapi.EnvVar, codec runtime.Codec) *DeploymentController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(kc.Events(""))
	recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployments-controller"})

	c := &DeploymentController{
		rn: kc,
		pn: kc,

		queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),

		serviceAccount: sa,
		deployerImage:  image,
		environment:    env,
		recorder:       recorder,
		codec:          codec,
	}

	c.rcStore.Indexer = rcInformer.GetIndexer()
	rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    c.addReplicationController,
		UpdateFunc: c.updateReplicationController,
	})

	c.podStore.Indexer = podInformer.GetIndexer()
	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		UpdateFunc: c.updatePod,
		DeleteFunc: c.deletePod,
	})

	c.rcStoreSynced = rcInformer.HasSynced
	c.podStoreSynced = podInformer.HasSynced

	return c
}
// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by replication controllers' label selector. They are deleted if given
// label selector is targeting only 1 replication controller.
func getServicesForDeletion(client client.Interface, labelSelector labels.Selector,
	namespace string) ([]api.Service, error) {

	replicationControllers, err := client.ReplicationControllers(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	// if label selector is targeting only 1 replication controller
	// then we can delete services targeted by this label selector,
	// otherwise we can not delete any services so just return empty list
	if len(replicationControllers.Items) != 1 {
		return []api.Service{}, nil
	}

	services, err := client.Services(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	return services.Items, nil
}
// Returns structure containing DaemonSet and Pods for the given daemon set.
func getRawDaemonSetWithPods(client client.Interface, namespace, name string) (
	*DaemonSetWithPods, error) {
	daemonSet, err := client.Extensions().DaemonSets(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	labelSelector, err := unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector)
	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(
		api.ListOptions{
			LabelSelector: labelSelector,
			FieldSelector: fields.Everything(),
		})

	if err != nil {
		return nil, err
	}

	daemonSetAndPods := &DaemonSetWithPods{
		DaemonSet: daemonSet,
		Pods:      pods,
	}
	return daemonSetAndPods, nil
}
예제 #10
0
func NewJobController(kubeClient client.Interface) *JobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	jm := &JobController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}),
		},
		expectations: controller.NewControllerExpectations(),
		queue:        workqueue.New(),
	}

	jm.jobStore.Store, jm.jobController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&experimental.Job{},
		replicationcontroller.FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: jm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				if job := cur.(*experimental.Job); !isJobFinished(job) {
					jm.enqueueController(job)
				}
			},
			DeleteFunc: jm.enqueueController,
		},
	)

	jm.podStore.Store, jm.podController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return jm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return jm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Pod{},
		replicationcontroller.PodRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    jm.addPod,
			UpdateFunc: jm.updatePod,
			DeleteFunc: jm.deletePod,
		},
	)

	jm.updateHandler = jm.updateJobStatus
	jm.syncHandler = jm.syncJob
	jm.podStoreSynced = jm.podController.HasSynced
	return jm
}
예제 #11
0
func formatImageStreamQuota(out *tabwriter.Writer, c client.Interface, kc kclient.Interface, stream *imageapi.ImageStream) {
	quotas, err := kc.ResourceQuotas(stream.Namespace).List(api.ListOptions{})
	if err != nil {
		return
	}

	var limit *resource.Quantity
	for _, item := range quotas.Items {
		// search for smallest ImageStream quota
		if value, ok := item.Spec.Hard[imageapi.ResourceImageStreamSize]; ok {
			if limit == nil || limit.Cmp(value) > 0 {
				limit = &value
			}
		}
	}
	if limit != nil {
		quantity := imagequota.GetImageStreamSize(c, stream, make(map[string]*imageapi.Image))
		scale := mega
		if quantity.Value() >= (1<<giga.scale) || limit.Value() >= (1<<giga.scale) {
			scale = giga
		}
		formatString(out, "Quota Usage", fmt.Sprintf("%s / %s",
			formatQuantity(quantity, scale), formatQuantity(limit, scale)))
	}
}
예제 #12
0
func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentStatusViewer{c.Extensions()}, nil
	}
	return nil, fmt.Errorf("no status viewer has been implemented for %v", kind)
}
// TODO(floreks): This should be transactional to make sure that RC will not be deleted without pods
// Deletes replication controller with given name in given namespace and related pods.
// Also deletes services related to replication controller if deleteServices is true.
func DeleteReplicationController(client client.Interface, namespace, name string,
	deleteServices bool) error {

	log.Printf("Deleting %s replication controller from %s namespace", name, namespace)

	if deleteServices {
		if err := DeleteReplicationControllerServices(client, namespace, name); err != nil {
			return err
		}
	}

	pods, err := getRawReplicationControllerPods(client, namespace, name)
	if err != nil {
		return err
	}

	if err := client.ReplicationControllers(namespace).Delete(name); err != nil {
		return err
	}

	for _, pod := range pods.Items {
		if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted %s replication controller from %s namespace", name, namespace)

	return nil
}
예제 #14
0
func makeNNodes(c client.Interface, N int) {
	baseNode := &api.Node{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-node-",
		},
		Spec: api.NodeSpec{
			ExternalID: "foobar",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods:   *resource.NewQuantity(32, resource.DecimalSI),
				api.ResourceCPU:    resource.MustParse("4"),
				api.ResourceMemory: resource.MustParse("32Gi"),
			},
			Phase: api.NodeRunning,
			Conditions: []api.NodeCondition{
				{Type: api.NodeReady, Status: api.ConditionTrue},
			},
		},
	}
	for i := 0; i < N; i++ {
		if _, err := c.Nodes().Create(baseNode); err != nil {
			panic("error creating node: " + err.Error())
		}
	}
}
예제 #15
0
파일: admin.go 프로젝트: RomainVabre/origin
// InstallRegistry checks whether a registry is installed and installs one if not already installed
func (h *Helper) InstallRegistry(kubeClient kclient.Interface, f *clientcmd.Factory, configDir, images string, out io.Writer) error {
	_, err := kubeClient.Services("default").Get(svcDockerRegistry)
	if err == nil {
		// If there's no error, the registry already exists
		return nil
	}
	if !apierrors.IsNotFound(err) {
		return errors.NewError("error retrieving docker registry service").WithCause(err)
	}
	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = images
	cfg := &registry.RegistryConfig{
		Name:           "registry",
		Type:           "docker-registry",
		ImageTemplate:  imageTemplate,
		Ports:          "5000",
		Replicas:       1,
		Labels:         "docker-registry=default",
		Volume:         "/registry",
		ServiceAccount: "registry",
	}
	cmd := registry.NewCmdRegistry(f, "", "registry", out)
	output := &bytes.Buffer{}
	err = registry.RunCmdRegistry(f, cmd, output, cfg, []string{})
	glog.V(4).Infof("Registry command output:\n%s", output.String())
	return err
}
예제 #16
0
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
예제 #17
0
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) {
	items, err := kubeClient.Pods(ns).List(unversioned.ListOptions{})
	if err != nil {
		return 0, err
	}
	expired := unversioned.Now().After(before.Time)
	var deleteOptions *api.DeleteOptions
	if expired {
		deleteOptions = api.NewDeleteOptions(0)
	}
	estimate := int64(0)
	for i := range items.Items {
		if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
			grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
			if grace > estimate {
				estimate = grace
			}
		}
		err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions)
		if err != nil && !errors.IsNotFound(err) {
			return 0, err
		}
	}
	if expired {
		estimate = 0
	}
	return estimate, nil
}
예제 #18
0
func forcefullyDeletePod(c client.Interface, pod *api.Pod) {
	var zero int64
	err := c.Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
	if err != nil {
		util.HandleError(err)
	}
}
예제 #19
0
파일: rolling.go 프로젝트: ncantor/origin
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
	return &RollingDeploymentStrategy{
		codec:           codec,
		initialStrategy: initialStrategy,
		client:          client,
		apiRetryPeriod:  DefaultApiRetryPeriod,
		apiRetryTimeout: DefaultApiRetryTimeout,
		rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
			updater := kubectl.NewRollingUpdater(namespace, client)
			return updater.Update(config)
		},
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
			return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
		},
	}
}
예제 #20
0
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, experimentalMode bool, resyncPeriod time.Duration) *NamespaceController {
	var controller *framework.Controller
	_, controller = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							// Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s
							// for pods.  However, most processes will terminate faster - within a few seconds, probably
							// with a peak within 5-10s.  So this division is a heuristic that avoids waiting the full
							// duration when in many cases things complete more quickly. The extra second added is to
							// ensure we never wait 0 seconds.
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
		},
	)

	return &NamespaceController{
		controller: controller,
	}
}
예제 #21
0
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
	// Deep copy the rc in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(rc)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
	}
	oldRc := obj.(*api.ReplicationController)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(rc)
		if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil {
			// rc contains the latest controller post update
			return
		}
		updateErr := e
		// Update the controller with the latest resource version, if the update failed we
		// can't trust rc so use oldRc.Name.
		if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil {
			// The Get failed: Value in rc cannot be trusted.
			rc = oldRc
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return rc, err
}
예제 #22
0
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := compileTerminatedPodSelector()

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				options := unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{terminatedSelector}}
				return gcc.kubeClient.Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
				options.FieldSelector.Selector = terminatedSelector
				return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
예제 #23
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	newRCTemplate := GetNewRCTemplate(deployment)
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	for _, value := range oldRCs {
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}
예제 #24
0
// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by daemon sets' label selector. They are deleted if given
// label selector is targeting only 1 daemon set.
func getServicesForDSDeletion(client client.Interface, labelSelector labels.Selector,
	namespace string) ([]api.Service, error) {

	daemonSet, err := client.Extensions().DaemonSets(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	// if label selector is targeting only 1 daemon set
	// then we can delete services targeted by this label selector,
	// otherwise we can not delete any services so just return empty list
	if len(daemonSet.Items) != 1 {
		return []api.Service{}, nil
	}

	services, err := client.Services(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	return services.Items, nil
}
예제 #25
0
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
예제 #26
0
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c client.Interface) (*api.ReplicationController, error) {
	return GetNewRCFromList(deployment, c,
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
			rcList, err := c.ReplicationControllers(namespace).List(options)
			return rcList.Items, err
		})
}
예제 #27
0
// ValidateAppName validates application name. When error is returned, name validity could not be
// determined.
func ValidateAppName(spec *AppNameValiditySpec, client client.Interface) (*AppNameValidity, error) {
	log.Printf("Validating %s application name in %s namespace", spec.Name, spec.Namespace)

	isValidRc := false
	isValidService := false

	_, err := client.ReplicationControllers(spec.Namespace).Get(spec.Name)
	if err != nil {
		if isNotFoundError(err) {
			isValidRc = true
		} else {
			return nil, err
		}
	}

	_, err = client.Services(spec.Namespace).Get(spec.Name)
	if err != nil {
		if isNotFoundError(err) {
			isValidService = true
		} else {
			return nil, err
		}
	}

	isValid := isValidRc && isValidService

	log.Printf("Validation result for %s application name in %s namespace is %t", spec.Name,
		spec.Namespace, isValid)

	return &AppNameValidity{Valid: isValid}, nil
}
// Deletes services related to replication controller with given name in given namespace.
func DeleteReplicationControllerServices(client client.Interface, namespace, name string) error {
	log.Printf("Deleting services related to %s replication controller from %s namespace", name,
		namespace)

	replicationController, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return err
	}

	labelSelector, err := toLabelSelector(replicationController.Spec.Selector)
	if err != nil {
		return err
	}

	services, err := getServicesForDeletion(client, labelSelector, namespace)
	if err != nil {
		return err
	}

	for _, service := range services {
		if err := client.Services(namespace).Delete(service.Name); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted services related to %s replication controller from %s namespace",
		name, namespace)

	return nil
}
예제 #29
0
파일: main.go 프로젝트: raggi/contrib
// ParseAnnotations parses the annotations contained in the ingress
// rule used to add authentication in the paths defined in the rule
// and generated an htpasswd compatible file to be used as source
// during the authentication process
func ParseAnnotations(kubeClient client.Interface, ing *extensions.Ingress, authDir string) (*Nginx, error) {
	if ing.GetAnnotations() == nil {
		return &Nginx{}, ErrMissingAnnotations
	}

	at, err := ingAnnotations(ing.GetAnnotations()).authType()
	if err != nil {
		return &Nginx{}, err
	}

	s, err := ingAnnotations(ing.GetAnnotations()).secretName()
	if err != nil {
		return &Nginx{}, err
	}

	secret, err := kubeClient.Secrets(ing.Namespace).Get(s)
	if err != nil {
		return &Nginx{}, err
	}

	realm := ingAnnotations(ing.GetAnnotations()).realm()

	passFile := fmt.Sprintf("%v/%v-%v.passwd", authDir, ing.GetNamespace(), ing.GetName())
	err = dumpSecret(passFile, secret)
	if err != nil {
		return &Nginx{}, err
	}

	return &Nginx{
		Type:    at,
		Realm:   realm,
		File:    passFile,
		Secured: true,
	}, nil
}
예제 #30
0
func GetServerVersion(w io.Writer, kubeClient client.Interface) {
	serverVersion, err := kubeClient.ServerVersion()
	if err != nil {
		fmt.Printf("Couldn't read server version from server: %v\n", err)
		os.Exit(1)
	}

	fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion)
}