Esempio n. 1
0
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c clientset.Interface) (*api.ReplicationController, error) {
	return GetNewRCFromList(deployment, c,
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
			rcList, err := c.Core().ReplicationControllers(namespace).List(options)
			return rcList.Items, err
		})
}
Esempio n. 2
0
func newPersistentVolumeClaimInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
	sharedIndexInformer := cache.NewSharedIndexInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				var internalOptions api.ListOptions
				if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil {
					return nil, err
				}
				return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(internalOptions)
			},
		},
		&api.PersistentVolumeClaim{},
		resyncPeriod,
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	return sharedIndexInformer
}
Esempio n. 3
0
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
	allPodsLabeled := true
	for _, pod := range podList.Items {
		// Only label the pod that doesn't already have the new hash
		if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
			if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
				func(podToUpdate *api.Pod) error {
					// Precondition: the pod doesn't contain the new hash in its label.
					if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
						return errors.ErrPreconditionViolated
					}
					podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
					return nil
				}); err != nil {
				return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err)
			} else if podUpdated {
				glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash)
			} else {
				// If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error.
				// Then we can't say all pods are labeled
				allPodsLabeled = false
			}
		}
	}
	return allPodsLabeled, nil
}
Esempio n. 4
0
func NewHollowProxyOrDie(
	nodeName string,
	client clientset.Interface,
	endpointsConfig *proxyconfig.EndpointsConfig,
	serviceConfig *proxyconfig.ServiceConfig,
	iptInterface utiliptables.Interface,
	broadcaster record.EventBroadcaster,
	recorder record.EventRecorder,
) *HollowProxy {
	// Create and start Hollow Proxy
	config := options.NewProxyConfig()
	config.OOMScoreAdj = util.Int32Ptr(0)
	config.ResourceContainer = ""
	config.NodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      nodeName,
		UID:       types.UID(nodeName),
		Namespace: "",
	}
	proxyconfig.NewSourceAPI(
		client.Core().RESTClient(),
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	hollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, "fake")
	if err != nil {
		glog.Fatalf("Error while creating ProxyServer: %v\n", err)
	}
	return &HollowProxy{
		ProxyServer: hollowProxy,
	}
}
Esempio n. 5
0
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *api.Pod) error {
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: controllerName,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: int32(podCount),
			Selector: map[string]string{"name": controllerName},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": controllerName},
				},
				Spec: podTemplate.Spec,
			},
		},
	}
	var err error
	for attempt := 0; attempt < retries; attempt++ {
		if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
			return nil
		}
		glog.Errorf("Error while creating rc, maybe retry: %v", err)
	}
	return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
Esempio n. 6
0
func makeNodes(c clientset.Interface, nodeCount int) {
	glog.Infof("making %d nodes", nodeCount)
	baseNode := &api.Node{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-node-",
		},
		Spec: api.NodeSpec{
			ExternalID: "foobar",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods:   *resource.NewQuantity(110, resource.DecimalSI),
				api.ResourceCPU:    resource.MustParse("4"),
				api.ResourceMemory: resource.MustParse("32Gi"),
			},
			Phase: api.NodeRunning,
			Conditions: []api.NodeCondition{
				{Type: api.NodeReady, Status: api.ConditionTrue},
			},
		},
	}
	for i := 0; i < nodeCount; i++ {
		if _, err := c.Core().Nodes().Create(baseNode); err != nil {
			panic("error creating node: " + err.Error())
		}
	}
}
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
	result, err := c.Core().RESTClient().Get().
		Prefix("proxy").
		Namespace("kube-system").
		Resource("services").
		Name(influxdbService+":api").
		Suffix("query").
		Param("q", query).
		Param("db", influxdbDatabaseName).
		Param("epoch", "s").
		Do().
		Raw()

	if err != nil {
		return nil, err
	}

	var response influxdb.Response
	dec := json.NewDecoder(bytes.NewReader(result))
	dec.UseNumber()
	err = dec.Decode(&response)

	if err != nil {
		return nil, err
	}
	return &response, nil
}
Esempio n. 8
0
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclientset.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &Deployer{
		out:    out,
		errOut: errOut,
		until:  until,
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.Core().ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.Core().ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Spec.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), out, errOut, until)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, &kcoreclient.EventSinkImpl{Interface: client.Core().Events("")}, kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
			}
		},
	}
}
// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by replication controllers' label selector. They are deleted if given
// label selector is targeting only 1 replication controller.
func getServicesForDeletion(client client.Interface, labelSelector labels.Selector,
	namespace string) ([]api.Service, error) {

	replicationControllers, err := client.Core().ReplicationControllers(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	// if label selector is targeting only 1 replication controller
	// then we can delete services targeted by this label selector,
	// otherwise we can not delete any services so just return empty list
	if len(replicationControllers.Items) != 1 {
		return []api.Service{}, nil
	}

	services, err := client.Core().Services(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	return services.Items, nil
}
Esempio n. 10
0
// Delete the passed in pod.
func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) error {

	framework.Logf("Deleting pod %v", pod.Name)
	err := c.Core().Pods(ns).Delete(pod.Name, nil)
	if err != nil {
		return fmt.Errorf("Pod %v encountered a delete error: %v", pod.Name, err)
	}

	// Wait for pod to terminate
	err = f.WaitForPodTerminated(pod.Name, "")
	if err != nil && !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v will not teminate: %v", pod.Name, err)
	}

	// Re-get the pod to double check that it has been deleted; expect err
	// Note: Get() writes a log error if the pod is not found
	_, err = c.Core().Pods(ns).Get(pod.Name)
	if err == nil {
		return fmt.Errorf("Pod %v has been deleted but able to re-Get the deleted pod", pod.Name)
	}
	if !apierrs.IsNotFound(err) {
		return fmt.Errorf("Pod %v has been deleted but still exists: %v", pod.Name, err)
	}

	framework.Logf("Ignore \"not found\" error above. Pod %v successfully deleted", pod.Name)
	return nil
}
Esempio n. 11
0
func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
Esempio n. 12
0
// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors. Return the pv and pvc to
// reflect that these resources have been retrieved again (Get).
// Note: the pv and pvc are returned back to the It() caller so that the
//   AfterEach func can delete these objects if they are not nil.
func waitAndValidatePVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	var err error

	// Wait for pv and pvc to bind to each other
	if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
		return pv, pvc, err
	}

	// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
	framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
	pv, err = c.Core().PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}

	pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
	}

	if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
		pvJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON))
	}

	return pv, pvc, nil
}
func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) {
	registeredMaster := false
	masterName := ""
	nodeList, err := c.Core().Nodes().List(api.ListOptions{})
	if err != nil {
		return nil, err
	}
	if len(nodeList.Items) < 1 {
		glog.Warning("Can't find any Nodes in the API server to grab metrics from")
	}
	for _, node := range nodeList.Items {
		if system.IsMasterNode(&node) {
			registeredMaster = true
			masterName = node.Name
			break
		}
	}
	if !registeredMaster {
		scheduler = false
		controllers = false
		glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.")
	}

	return &MetricsGrabber{
		client:                    c,
		grabFromApiServer:         apiServer,
		grabFromControllerManager: controllers,
		grabFromKubelets:          kubelets,
		grabFromScheduler:         scheduler,
		masterName:                masterName,
		registeredMaster:          registeredMaster,
	}, nil
}
Esempio n. 14
0
// newQuotaAccessor creates an object that conforms to the QuotaAccessor interface to be used to retrieve quota objects.
func newQuotaAccessor(client clientset.Interface) (*quotaAccessor, error) {
	liveLookupCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	updatedCache, err := lru.New(100)
	if err != nil {
		return nil, err
	}
	lw := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).List(internalOptions)
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			internalOptions := api.ListOptions{}
			v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
			return client.Core().ResourceQuotas(api.NamespaceAll).Watch(internalOptions)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)

	return &quotaAccessor{
		client:          client,
		indexer:         indexer,
		reflector:       reflector,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
		updatedQuotas:   updatedCache,
	}, nil
}
func deleteDNSScalingConfigMap(c clientset.Interface) error {
	if err := c.Core().ConfigMaps(DNSNamespace).Delete(DNSAutoscalerLabelName, nil); err != nil {
		return err
	}
	framework.Logf("DNS autoscaling ConfigMap deleted.")
	return nil
}
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
		nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
			"spec.unschedulable": "false",
		}.AsSelector()})
		if err != nil {
			glog.Warningf("Failed to list nodes: %v", err)
			continue
		}
		numNodes := len(nodes.Items)

		// Filter out not-ready nodes.
		framework.FilterNodes(nodes, func(node api.Node) bool {
			return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
		})
		numReady := len(nodes.Items)

		if numNodes == numReady && sizeFunc(numReady) {
			glog.Infof("Cluster has reached the desired size")
			return nil
		}
		glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
	}
	return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
Esempio n. 17
0
// IsValidSecret checks if exists a secret with the specified name
func IsValidSecret(kubeClient clientset.Interface, name string) (*api.Secret, error) {
	ns, name, err := ParseNameNS(name)
	if err != nil {
		return nil, err
	}
	return kubeClient.Core().Secrets(ns).Get(name)
}
Esempio n. 18
0
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
	allResources := []api.ResourceName{api.ResourceQuotas}
	return &generic.GenericEvaluator{
		Name:              "Evaluator.ResourceQuota",
		InternalGroupKind: api.Kind("ResourceQuota"),
		InternalOperationResources: map[admission.Operation][]api.ResourceName{
			admission.Create: allResources,
		},
		MatchedResourceNames: allResources,
		MatchesScopeFunc:     generic.MatchesNoScopeFunc,
		ConstraintsFunc:      generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
		UsageFunc:            generic.ObjectCountUsageFunc(api.ResourceQuotas),
		ListFuncByNamespace: func(namespace string, options api.ListOptions) ([]runtime.Object, error) {
			itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
			if err != nil {
				return nil, err
			}
			results := make([]runtime.Object, 0, len(itemList.Items))
			for i := range itemList.Items {
				results = append(results, &itemList.Items[i])
			}
			return results, nil
		},
	}
}
Esempio n. 19
0
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(client clientset.Interface, actions LimitRangerActions) (admission.Interface, error) {
	liveLookupCache, err := lru.New(10000)
	if err != nil {
		return nil, err
	}

	lw := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			return client.Core().LimitRanges(api.NamespaceAll).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			return client.Core().LimitRanges(api.NamespaceAll).Watch(options)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
	reflector.Run()

	if actions == nil {
		actions = &DefaultLimitRangerActions{}
	}

	return &limitRanger{
		Handler:         admission.NewHandler(admission.Create, admission.Update),
		client:          client,
		actions:         actions,
		indexer:         indexer,
		liveLookupCache: liveLookupCache,
		liveTTL:         time.Duration(30 * time.Second),
	}, nil
}
Esempio n. 20
0
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
	podClient := c.Core().Pods(ns)

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: framework.GetPauseImageName(c),
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	framework.ExpectNoError(err)
}
Esempio n. 21
0
File: pods.go Progetto: ncdc/origin
// NewPodEvaluator returns an evaluator that can evaluate pods
func NewPodEvaluator(kubeClient clientset.Interface) quota.Evaluator {
	computeResources := []api.ResourceName{
		api.ResourceCPU,
		api.ResourceMemory,
		api.ResourceRequestsCPU,
		api.ResourceRequestsMemory,
		api.ResourceLimitsCPU,
		api.ResourceLimitsMemory,
	}
	allResources := append(computeResources, api.ResourcePods)
	return &generic.GenericEvaluator{
		Name:              "Evaluator.Pod",
		InternalGroupKind: api.Kind("Pod"),
		InternalOperationResources: map[admission.Operation][]api.ResourceName{
			admission.Create: allResources,
			// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
			// admission.Update: computeResources,
		},
		GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
			return kubeClient.Core().Pods(namespace).Get(name)
		},
		ConstraintsFunc:      PodConstraintsFunc,
		MatchedResourceNames: allResources,
		MatchesScopeFunc:     PodMatchesScopeFunc,
		UsageFunc:            PodUsageFunc,
		ListFuncByNamespace: func(namespace string, options api.ListOptions) (runtime.Object, error) {
			return kubeClient.Core().Pods(namespace).List(options)
		},
	}
}
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}
	rc := &RouteController{
		routes:      routes,
		kubeClient:  kubeClient,
		clusterName: clusterName,
		clusterCIDR: clusterCIDR,
	}

	rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&api.Node{},
		controller.NoResyncPeriodFunc(),
		cache.ResourceEventHandlerFuncs{},
	)

	return rc
}
Esempio n. 23
0
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
	pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
	// no pod to start
	if replicas < 1 {
		panic("StartPods: number of replicas must be non-zero")
	}
	startPodsID := string(uuid.NewUUID()) // So that we can label and find them
	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.ObjectMeta.Labels["startPodsID"] = startPodsID
		pod.Spec.Containers[0].Name = podName
		_, err := c.Core().Pods(namespace).Create(&pod)
		if err != nil {
			return err
		}
	}
	logFunc("Waiting for running...")
	if waitForRunning {
		label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
		err := WaitForPodsWithLabelRunning(c, namespace, label)
		if err != nil {
			return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
		}
	}
	return nil
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client:                  cl,
		serviceAccountsToEnsure: options.ServiceAccounts,
		queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
	}
	if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
	}

	saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		DeleteFunc: e.serviceAccountDeleted,
	})
	nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    e.namespaceAdded,
		UpdateFunc: e.namespaceUpdated,
	})

	e.saSynced = saInformer.Informer().HasSynced
	e.saLister = saInformer.Lister()
	e.nsSynced = nsInformer.Informer().HasSynced
	e.nsLister = nsInformer.Lister()

	e.syncHandler = e.syncNamespace

	return e
}
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
	_, err := cl.Core().Pods(namespace).Get(name)
	if errors.IsNotFound(err) {
		return nil
	}
	return goerrors.New("pod not disappear")
}
Esempio n. 26
0
// NewSync for ConfigMap from namespace `ns` and `name`.
func NewSync(client clientset.Interface, ns string, name string) Sync {
	sync := &kubeSync{
		ns:      ns,
		name:    name,
		client:  client,
		channel: make(chan *Config),
	}

	listWatch := &cache.ListWatch{
		ListFunc: func(options api.ListOptions) (runtime.Object, error) {
			options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector()
			return client.Core().ConfigMaps(ns).List(options)
		},
		WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
			options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector()
			return client.Core().ConfigMaps(ns).Watch(options)
		},
	}

	store, controller := cache.NewInformer(
		listWatch,
		&api.ConfigMap{},
		time.Duration(0),
		cache.ResourceEventHandlerFuncs{
			AddFunc:    sync.onAdd,
			DeleteFunc: sync.onDelete,
			UpdateFunc: sync.onUpdate,
		})

	sync.store = store
	sync.controller = controller

	return sync
}
Esempio n. 27
0
// NewPodEvaluator returns an evaluator that can evaluate pods
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
	computeResources := []api.ResourceName{
		api.ResourceCPU,
		api.ResourceMemory,
		api.ResourceRequestsCPU,
		api.ResourceRequestsMemory,
		api.ResourceLimitsCPU,
		api.ResourceLimitsMemory,
	}
	allResources := append(computeResources, api.ResourcePods)
	listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
	if f != nil {
		listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, unversioned.GroupResource{Resource: "pods"})
	}
	return &generic.GenericEvaluator{
		Name:              "Evaluator.Pod",
		InternalGroupKind: api.Kind("Pod"),
		InternalOperationResources: map[admission.Operation][]api.ResourceName{
			admission.Create: allResources,
			// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
			// admission.Update: computeResources,
		},
		GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
			return kubeClient.Core().Pods(namespace).Get(name)
		},
		ConstraintsFunc:      PodConstraintsFunc,
		MatchedResourceNames: allResources,
		MatchesScopeFunc:     PodMatchesScopeFunc,
		UsageFunc:            PodUsageFunc,
		ListFuncByNamespace:  listFuncByNamespace,
	}
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*api.ConfigMap, error) {
	cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName)
	if err != nil {
		return nil, err
	}
	return cm, nil
}
Esempio n. 29
0
func ReaperFor(kind schema.GroupKind, c internalclientset.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c.Core(), Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c.Extensions(), Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c.Extensions(), Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c.Core()}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c.Core()}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c.Batch(), c.Core(), Interval, Timeout}, nil

	case apps.Kind("StatefulSet"):
		return &StatefulSetReaper{c.Apps(), c.Core(), Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c.Extensions(), c.Extensions(), Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Esempio n. 30
0
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(rcInformer, podInformer framework.SharedIndexInformer, kc kclientset.Interface, sa, image string, env []kapi.EnvVar, codec runtime.Codec) *DeploymentController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: kc.Core().Events("")})
	recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployments-controller"})

	c := &DeploymentController{
		rn: kc.Core(),
		pn: kc.Core(),

		queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),

		serviceAccount: sa,
		deployerImage:  image,
		environment:    env,
		recorder:       recorder,
		codec:          codec,
	}

	c.rcStore.Indexer = rcInformer.GetIndexer()
	rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    c.addReplicationController,
		UpdateFunc: c.updateReplicationController,
	})

	c.podStore.Indexer = podInformer.GetIndexer()
	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		UpdateFunc: c.updatePod,
		DeleteFunc: c.deletePod,
	})

	c.rcStoreSynced = rcInformer.HasSynced
	c.podStoreSynced = podInformer.HasSynced

	return c
}