Ejemplo n.º 1
0
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
	podClient := c.Core().Pods(ns)

	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Name: name,
		},
		Spec: v1.PodSpec{
			Containers: []v1.Container{
				{
					Name:  "pause",
					Image: framework.GetPauseImageName(c),
					Resources: v1.ResourceRequirements{
						Requests: v1.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	framework.ExpectNoError(err)
}
Ejemplo n.º 2
0
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
	allPodsLabeled := true
	for _, pod := range podList.Items {
		// Only label the pod that doesn't already have the new hash
		if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
			if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
				func(podToUpdate *v1.Pod) error {
					// Precondition: the pod doesn't contain the new hash in its label.
					if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
						return errors.ErrPreconditionViolated
					}
					podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
					return nil
				}); err != nil {
				return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err)
			} else if podUpdated {
				glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash)
			} else {
				// If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error.
				// Then we can't say all pods are labeled
				allPodsLabeled = false
			}
		}
	}
	return allPodsLabeled, nil
}
Ejemplo n.º 3
0
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error {
	_, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{})
	if errors.IsNotFound(err) {
		return nil
	}
	return goerrors.New("pod not disappear")
}
Ejemplo n.º 4
0
func buildAuth(nodeName types.NodeName, client clientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) {
	// Get clients, if provided
	var (
		tokenClient authenticationclient.TokenReviewInterface
		sarClient   authorizationclient.SubjectAccessReviewInterface
	)
	if client != nil && !reflect.ValueOf(client).IsNil() {
		tokenClient = client.Authentication().TokenReviews()
		sarClient = client.Authorization().SubjectAccessReviews()
	}

	authenticator, err := buildAuthn(tokenClient, config.Authentication)
	if err != nil {
		return nil, err
	}

	attributes := server.NewNodeAuthorizerAttributesGetter(nodeName)

	authorizer, err := buildAuthz(sarClient, config.Authorization)
	if err != nil {
		return nil, err
	}

	return server.NewKubeletAuth(authenticator, attributes, authorizer), nil
}
Ejemplo n.º 5
0
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error {
	var node *v1.Node
	var err error
	for attempt := 0; attempt < retries; attempt++ {
		node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{})
		if err != nil {
			return err
		}
		if node.Labels == nil {
			return nil
		}
		for _, labelKey := range labelKeys {
			if node.Labels == nil || len(node.Labels[labelKey]) == 0 {
				break
			}
			delete(node.Labels, labelKey)
		}
		_, err = c.Core().Nodes().Update(node)
		if err != nil {
			if !apierrs.IsConflict(err) {
				return err
			} else {
				glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName)
			}
		} else {
			break
		}
		time.Sleep(100 * time.Millisecond)
	}
	return err
}
Ejemplo n.º 6
0
// Retrieves metrics information.
func getMetrics(c clientset.Interface) (string, error) {
	body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
	if err != nil {
		return "", err
	}
	return string(body), nil
}
Ejemplo n.º 7
0
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
	const maxRetries = 5
	for nodeName := range nodeNames {
		var node *v1.Node
		var err error
		for i := 0; i < maxRetries; i++ {
			node, err = c.Core().Nodes().Get(nodeName)
			if err != nil {
				framework.Logf("Error getting node %s: %v", nodeName, err)
				continue
			}
			if toAdd != nil {
				for k, v := range toAdd {
					node.ObjectMeta.Labels[k] = v
				}
			}
			if toRemove != nil {
				for k := range toRemove {
					delete(node.ObjectMeta.Labels, k)
				}
			}
			_, err = c.Core().Nodes().Update(node)
			if err != nil {
				framework.Logf("Error updating node %s: %v", nodeName, err)
			} else {
				break
			}
		}
		Expect(err).NotTo(HaveOccurred())
	}
}
Ejemplo n.º 8
0
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
	cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
	if err != nil {
		return nil, err
	}
	return cm, nil
}
Ejemplo n.º 9
0
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
	cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName)
	if err != nil {
		return nil, err
	}
	return cm, nil
}
Ejemplo n.º 10
0
// NewSync for ConfigMap from namespace `ns` and `name`.
func NewSync(client clientset.Interface, ns string, name string) Sync {
	sync := &kubeSync{
		ns:      ns,
		name:    name,
		client:  client,
		channel: make(chan *Config),
	}

	listWatch := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
			return client.Core().ConfigMaps(ns).List(options)
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String()
			return client.Core().ConfigMaps(ns).Watch(options)
		},
	}

	store, controller := cache.NewInformer(
		listWatch,
		&v1.ConfigMap{},
		time.Duration(0),
		cache.ResourceEventHandlerFuncs{
			AddFunc:    sync.onAdd,
			DeleteFunc: sync.onDelete,
			UpdateFunc: sync.onUpdate,
		})

	sync.store = store
	sync.controller = controller

	return sync
}
Ejemplo n.º 11
0
// Wait for the pv and pvc to bind to each other.
func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {

	// Wait for newly created PVC to bind to the PV
	framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
	err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
	Expect(err).NotTo(HaveOccurred())

	// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
	// since the PVC is already bound.
	err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
	Expect(err).NotTo(HaveOccurred())

	// Re-get the pv and pvc objects
	pv, err = c.Core().PersistentVolumes().Get(pv.Name)
	Expect(err).NotTo(HaveOccurred())

	// Re-get the pvc and
	pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name)
	Expect(err).NotTo(HaveOccurred())

	// The pv and pvc are both bound, but to each other?
	// Check that the PersistentVolume.ClaimRef matches the PVC
	Expect(pv.Spec.ClaimRef).NotTo(BeNil())
	Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name))
	Expect(pvc.Spec.VolumeName).To(Equal(pv.Name))
	Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID))
}
Ejemplo n.º 12
0
func deleteDNSScalingConfigMap(c clientset.Interface) error {
	if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
		return err
	}
	framework.Logf("DNS autoscaling ConfigMap deleted.")
	return nil
}
Ejemplo n.º 13
0
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
	rc := &v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name: controllerName,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
			Selector: map[string]string{"name": controllerName},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{"name": controllerName},
				},
				Spec: podTemplate.Spec,
			},
		},
	}
	var err error
	for attempt := 0; attempt < retries; attempt++ {
		if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil {
			return nil
		}
		glog.Errorf("Error while creating rc, maybe retry: %v", err)
	}
	return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err)
}
Ejemplo n.º 14
0
// Query sends a command to the server and returns the Response
func Query(c clientset.Interface, query string) (*influxdb.Response, error) {
	result, err := c.Core().RESTClient().Get().
		Prefix("proxy").
		Namespace("kube-system").
		Resource("services").
		Name(influxdbService+":api").
		Suffix("query").
		Param("q", query).
		Param("db", influxdbDatabaseName).
		Param("epoch", "s").
		Do().
		Raw()

	if err != nil {
		return nil, err
	}

	var response influxdb.Response
	dec := json.NewDecoder(bytes.NewReader(result))
	dec.UseNumber()
	err = dec.Decode(&response)

	if err != nil {
		return nil, err
	}
	return &response, nil
}
Ejemplo n.º 15
0
// NewResourceQuotaEvaluator returns an evaluator that can evaluate resource quotas
func NewResourceQuotaEvaluator(kubeClient clientset.Interface) quota.Evaluator {
	allResources := []api.ResourceName{api.ResourceQuotas}
	return &generic.GenericEvaluator{
		Name:              "Evaluator.ResourceQuota",
		InternalGroupKind: api.Kind("ResourceQuota"),
		InternalOperationResources: map[admission.Operation][]api.ResourceName{
			admission.Create: allResources,
		},
		MatchedResourceNames: allResources,
		MatchesScopeFunc:     generic.MatchesNoScopeFunc,
		ConstraintsFunc:      generic.ObjectCountConstraintsFunc(api.ResourceQuotas),
		UsageFunc:            generic.ObjectCountUsageFunc(api.ResourceQuotas),
		ListFuncByNamespace: func(namespace string, options v1.ListOptions) ([]runtime.Object, error) {
			itemList, err := kubeClient.Core().ResourceQuotas(namespace).List(options)
			if err != nil {
				return nil, err
			}
			results := make([]runtime.Object, 0, len(itemList.Items))
			for i := range itemList.Items {
				results = append(results, &itemList.Items[i])
			}
			return results, nil
		},
	}
}
Ejemplo n.º 16
0
// NewPodEvaluator returns an evaluator that can evaluate pods
// if the specified shared informer factory is not nil, evaluator may use it to support listing functions.
func NewPodEvaluator(kubeClient clientset.Interface, f informers.SharedInformerFactory) quota.Evaluator {
	computeResources := []api.ResourceName{
		api.ResourceCPU,
		api.ResourceMemory,
		api.ResourceRequestsCPU,
		api.ResourceRequestsMemory,
		api.ResourceLimitsCPU,
		api.ResourceLimitsMemory,
	}
	allResources := append(computeResources, api.ResourcePods)
	listFuncByNamespace := listPodsByNamespaceFuncUsingClient(kubeClient)
	if f != nil {
		listFuncByNamespace = generic.ListResourceUsingInformerFunc(f, schema.GroupResource{Resource: "pods"})
	}
	return &generic.GenericEvaluator{
		Name:              "Evaluator.Pod",
		InternalGroupKind: api.Kind("Pod"),
		InternalOperationResources: map[admission.Operation][]api.ResourceName{
			admission.Create: allResources,
			// TODO: the quota system can only charge for deltas on compute resources when pods support updates.
			// admission.Update: computeResources,
		},
		GetFuncByNamespace: func(namespace, name string) (runtime.Object, error) {
			return kubeClient.Core().Pods(namespace).Get(name)
		},
		ConstraintsFunc:      PodConstraintsFunc,
		MatchedResourceNames: allResources,
		MatchesScopeFunc:     PodMatchesScopeFunc,
		UsageFunc:            PodUsageFunc,
		ListFuncByNamespace:  listFuncByNamespace,
	}
}
Ejemplo n.º 17
0
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}
	rc := &RouteController{
		routes:      routes,
		kubeClient:  kubeClient,
		clusterName: clusterName,
		clusterCIDR: clusterCIDR,
	}

	rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				return rc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				return rc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&v1.Node{},
		controller.NoResyncPeriodFunc(),
		cache.ResourceEventHandlerFuncs{},
	)

	return rc
}
Ejemplo n.º 18
0
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
		nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
			"spec.unschedulable": "false",
		}.AsSelector().String()})
		if err != nil {
			glog.Warningf("Failed to list nodes: %v", err)
			continue
		}
		numNodes := len(nodes.Items)

		// Filter out not-ready nodes.
		framework.FilterNodes(nodes, func(node v1.Node) bool {
			return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
		})
		numReady := len(nodes.Items)

		if numNodes == numReady && sizeFunc(numReady) {
			glog.Infof("Cluster has reached the desired size")
			return nil
		}
		glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
	}
	return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client:                  cl,
		serviceAccountsToEnsure: options.ServiceAccounts,
		queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
	}
	if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
	}

	saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		DeleteFunc: e.serviceAccountDeleted,
	})
	nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    e.namespaceAdded,
		UpdateFunc: e.namespaceUpdated,
	})

	e.saSynced = saInformer.Informer().HasSynced
	e.saLister = saInformer.Lister()
	e.nsSynced = nsInformer.Informer().HasSynced
	e.nsLister = nsInformer.Lister()

	e.syncHandler = e.syncNamespace

	return e
}
Ejemplo n.º 20
0
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string,
	pod v1.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
	// no pod to start
	if replicas < 1 {
		panic("StartPods: number of replicas must be non-zero")
	}
	startPodsID := string(uuid.NewUUID()) // So that we can label and find them
	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.ObjectMeta.Labels["startPodsID"] = startPodsID
		pod.Spec.Containers[0].Name = podName
		_, err := c.Core().Pods(namespace).Create(&pod)
		if err != nil {
			return err
		}
	}
	logFunc("Waiting for running...")
	if waitForRunning {
		label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
		err := WaitForPodsWithLabelRunning(c, namespace, label)
		if err != nil {
			return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
		}
	}
	return nil
}
Ejemplo n.º 21
0
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
	path := "test/images/clusterapi-tester/pod.yaml"
	p, err := podFromManifest(path)
	if err != nil {
		framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
		return
	}
	p.Namespace = ns
	if _, err := c.Core().Pods(ns).Create(p); err != nil {
		framework.Logf("Failed to create %v: %v", p.Name, err)
		return
	}
	defer func() {
		if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil {
			framework.Logf("Failed to delete pod %v: %v", p.Name, err)
		}
	}()
	timeout := 5 * time.Minute
	if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
		framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
		return
	}
	logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
	if err != nil {
		framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
	} else {
		framework.Logf("Output of clusterapi-tester:\n%v", logs)
	}
}
Ejemplo n.º 22
0
// PatchNodeStatus patches node status.
func PatchNodeStatus(c clientset.Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) {
	oldData, err := json.Marshal(oldNode)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
	}

	// Reset spec to make sure only patch for Status or ObjectMeta is generated.
	// Note that we don't reset ObjectMeta here, because:
	// 1. This aligns with Nodes().UpdateStatus().
	// 2. Some component does use this to update node annotations.
	newNode.Spec = oldNode.Spec
	newData, err := json.Marshal(newNode)
	if err != nil {
		return nil, fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err)
	}

	patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
	if err != nil {
		return nil, fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
	}

	updatedNode, err := c.Core().Nodes().Patch(string(nodeName), api.StrategicMergePatchType, patchBytes, "status")
	if err != nil {
		return nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
	}
	return updatedNode, nil
}
Ejemplo n.º 23
0
// NewReplicationManager creates a replication manager
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	return newReplicationManager(
		eventBroadcaster.NewRecorder(v1.EventSource{Component: "replication-controller"}),
		podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
}
Ejemplo n.º 24
0
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
	_, err := c.Core().ConfigMaps(api.NamespaceSystem).Update(configMap)
	if err != nil {
		return err
	}
	framework.Logf("DNS autoscaling ConfigMap updated.")
	return nil
}
Ejemplo n.º 25
0
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) {
	g := containerResourceGatherer{
		client:               c,
		stopCh:               make(chan struct{}),
		containerIDToNameMap: make(map[string]string),
		containerIDs:         make([]string, 0),
		options:              options,
	}

	if options.inKubemark {
		g.workerWg.Add(1)
		g.workers = append(g.workers, resourceGatherWorker{
			inKubemark: true,
			stopCh:     g.stopCh,
			wg:         &g.workerWg,
			finished:   false,
		})
	} else {
		pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{})
		if err != nil {
			Logf("Error while listing Pods: %v", err)
			return nil, err
		}
		for _, pod := range pods.Items {
			for _, container := range pod.Status.ContainerStatuses {
				containerID := strings.TrimPrefix(container.ContainerID, "docker:/")
				g.containerIDToNameMap[containerID] = pod.Name + "/" + container.Name
				g.containerIDs = append(g.containerIDs, containerID)
			}
		}
		nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
		if err != nil {
			Logf("Error while listing Nodes: %v", err)
			return nil, err
		}

		for _, node := range nodeList.Items {
			if !options.masterOnly || system.IsMasterNode(node.Name) {
				g.workerWg.Add(1)
				g.workers = append(g.workers, resourceGatherWorker{
					c:                    c,
					nodeName:             node.Name,
					wg:                   &g.workerWg,
					containerIDToNameMap: g.containerIDToNameMap,
					containerIDs:         g.containerIDs,
					stopCh:               g.stopCh,
					finished:             false,
					inKubemark:           false,
				})
				if options.masterOnly {
					break
				}
			}
		}
	}
	return &g, nil
}
Ejemplo n.º 26
0
// Wait for job to reach completions.
func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
	return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
		curr, err := c.Batch().Jobs(ns).Get(jobName)
		if err != nil {
			return false, err
		}
		return curr.Status.Succeeded == completions, nil
	})
}
Ejemplo n.º 27
0
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
	pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage))
	if err == nil {
		framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
	} else {
		framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
	}
	return err
}
Ejemplo n.º 28
0
// NewNodeLister builds a node lister.
func NewNodeLister(kubeClient client.Interface) *ReadyNodeLister {
	listWatcher := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
	nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
	reflector := cache.NewReflector(listWatcher, &apiv1.Node{}, nodeLister.Store, time.Hour)
	reflector.Run()
	return &ReadyNodeLister{
		nodeLister: nodeLister,
	}
}
Ejemplo n.º 29
0
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
	rc, err := c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
	if err != nil {
		return err
	}
	*(rc.Spec.Replicas) = replicas
	_, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc)
	return err
}
Ejemplo n.º 30
0
// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
	return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
		jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
		if err != nil {
			return false, err
		}
		return len(jobs.Items) >= atLeast, nil
	})
}