// Query sends a command to the server and returns the Response func Query(c clientset.Interface, query string) (*influxdb.Response, error) { ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() result, err := c.Core().RESTClient().Get(). Prefix("proxy"). Namespace("kube-system"). Resource("services"). Name(influxdbService+":api"). Suffix("query"). Param("q", query). Param("db", influxdbDatabaseName). Param("epoch", "s"). Do(). Raw() if err != nil { if ctx.Err() != nil { framework.Failf("Failed to query influx db: %v", err) } return nil, err } var response influxdb.Response dec := json.NewDecoder(bytes.NewReader(result)) dec.UseNumber() err = dec.Decode(&response) if err != nil { return nil, err } return &response, nil }
// RemoveLabelOffNode is for cleaning up labels temporarily added to node, // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []string) error { var node *v1.Node var err error for attempt := 0; attempt < retries; attempt++ { node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { return err } if node.Labels == nil { return nil } for _, labelKey := range labelKeys { if node.Labels == nil || len(node.Labels[labelKey]) == 0 { break } delete(node.Labels, labelKey) } _, err = c.Core().Nodes().Update(node) if err != nil { if !apierrs.IsConflict(err) { return err } else { glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) } } else { break } time.Sleep(100 * time.Millisecond) } return err }
// WaitForClusterSize waits until the cluster size matches the given function. func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { glog.Warningf("Failed to list nodes: %v", err) continue } numNodes := len(nodes.Items) // Filter out not-ready nodes. framework.FilterNodes(nodes, func(node v1.Node) bool { return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) }) numReady := len(nodes.Items) if numNodes == numReady && sizeFunc(numReady) { glog.Infof("Cluster has reached the desired size") return nil } glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) }
// NewCloudNodeController creates a CloudNodeController object func NewCloudNodeController( nodeInformer informers.NodeInformer, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeMonitorPeriod time.Duration) (*CloudNodeController, error) { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cloudcontrollermanager"}) eventBroadcaster.StartLogging(glog.Infof) if kubeClient != nil { glog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) } else { glog.V(0).Infof("No api server defined - no events will be sent to API server.") } cnc := &CloudNodeController{ nodeInformer: nodeInformer, kubeClient: kubeClient, recorder: recorder, cloud: cloud, nodeMonitorPeriod: nodeMonitorPeriod, } return cnc, nil }
// readTransactions reads # of transactions from the k8petstore web server endpoint. // for more details see the source of the k8petstore web server. func readTransactions(c clientset.Interface, ns string) (error, int) { proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get()) if errProxy != nil { return errProxy, -1 } ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() body, err := proxyRequest.Namespace(ns). Context(ctx). Name("frontend"). Suffix("llen"). DoRaw() if err != nil { if ctx.Err() != nil { framework.Failf("Failed to read petstore transactions: %v", err) } return err, -1 } totalTrans, err := strconv.Atoi(string(body)) return err, totalTrans }
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU. func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) { podClient := c.Core().Pods(ns) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "pause", Image: framework.GetPauseImageName(c), Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ // Request enough CPU to fit only two pods on a given node. v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), }, }, }, }, }, } _, err := podClient.Create(pod) framework.ExpectNoError(err) }
// PatchNodeStatus patches node status. func PatchNodeStatus(c clientset.Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, error) { oldData, err := json.Marshal(oldNode) if err != nil { return nil, fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) } // Reset spec to make sure only patch for Status or ObjectMeta is generated. // Note that we don't reset ObjectMeta here, because: // 1. This aligns with Nodes().UpdateStatus(). // 2. Some component does use this to update node annotations. newNode.Spec = oldNode.Spec newData, err := json.Marshal(newNode) if err != nil { return nil, fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNode, nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) if err != nil { return nil, fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } updatedNode, err := c.Core().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes, "status") if err != nil { return nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) } return updatedNode, nil }
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter()) } rc := &RouteController{ routes: routes, kubeClient: kubeClient, clusterName: clusterName, clusterCIDR: clusterCIDR, } rc.nodeStore.Store, rc.nodeController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { return rc.kubeClient.Core().Nodes().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { return rc.kubeClient.Core().Nodes().Watch(options) }, }, &v1.Node{}, controller.NoResyncPeriodFunc(), cache.ResourceEventHandlerFuncs{}, ) return rc }
// Query sends a command to the server and returns the Response func Query(c clientset.Interface, query string) (*influxdb.Response, error) { result, err := c.Core().RESTClient().Get(). Prefix("proxy"). Namespace("kube-system"). Resource("services"). Name(influxdbService+":api"). Suffix("query"). Param("q", query). Param("db", influxdbDatabaseName). Param("epoch", "s"). Do(). Raw() if err != nil { return nil, err } var response influxdb.Response dec := json.NewDecoder(bytes.NewReader(result)) dec.UseNumber() err = dec.Decode(&response) if err != nil { return nil, err } return &response, nil }
func NewMetricsGrabber(c clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { registeredMaster := false masterName := "" nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) if err != nil { return nil, err } if len(nodeList.Items) < 1 { glog.Warning("Can't find any Nodes in the API server to grab metrics from") } for _, node := range nodeList.Items { if system.IsMasterNode(node.Name) { registeredMaster = true masterName = node.Name break } } if !registeredMaster { scheduler = false controllers = false glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler and ControllerManager is disabled.") } return &MetricsGrabber{ client: c, grabFromApiServer: apiServer, grabFromControllerManager: controllers, grabFromKubelets: kubelets, grabFromScheduler: scheduler, masterName: masterName, registeredMaster: registeredMaster, }, nil }
// NewServiceAccountsController returns a new *ServiceAccountsController. func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController { e := &ServiceAccountsController{ client: cl, serviceAccountsToEnsure: options.ServiceAccounts, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"), } if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter()) } saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: e.serviceAccountDeleted, }) nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: e.namespaceAdded, UpdateFunc: e.namespaceUpdated, }) e.saSynced = saInformer.Informer().HasSynced e.saLister = saInformer.Lister() e.nsSynced = nsInformer.Informer().HasSynced e.nsLister = nsInformer.Lister() e.syncHandler = e.syncNamespace return e }
// NewSync for ConfigMap from namespace `ns` and `name`. func NewSync(client clientset.Interface, ns string, name string) Sync { sync := &kubeSync{ ns: ns, name: name, client: client, channel: make(chan *Config), } listWatch := &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String() return client.Core().ConfigMaps(ns).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { options.FieldSelector = fields.Set{"metadata.name": name}.AsSelector().String() return client.Core().ConfigMaps(ns).Watch(options) }, } store, controller := cache.NewInformer( listWatch, &v1.ConfigMap{}, time.Duration(0), cache.ResourceEventHandlerFuncs{ AddFunc: sync.onAdd, DeleteFunc: sync.onDelete, UpdateFunc: sync.onUpdate, }) sync.store = store sync.controller = controller return sync }
func checkMirrorPodDisappear(cl clientset.Interface, name, namespace string) error { _, err := cl.Core().Pods(namespace).Get(name, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } return goerrors.New("pod not disappear") }
// Retrieves metrics information. func getMetrics(c clientset.Interface) (string, error) { body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw() if err != nil { return "", err } return string(body), nil }
func deleteDNSScalingConfigMap(c clientset.Interface) error { if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { return err } framework.Logf("DNS autoscaling ConfigMap deleted.") return nil }
// Wait for the pv and pvc to bind to each other. func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { // Wait for newly created PVC to bind to the PV framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second) Expect(err).NotTo(HaveOccurred()) // Wait for PersistentVolume.Status.Phase to be Bound, which it should be // since the PVC is already bound. err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second) Expect(err).NotTo(HaveOccurred()) // Re-get the pv and pvc objects pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // Re-get the pvc and pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // The pv and pvc are both bound, but to each other? // Check that the PersistentVolume.ClaimRef matches the PVC Expect(pv.Spec.ClaimRef).NotTo(BeNil()) Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name)) Expect(pvc.Spec.VolumeName).To(Equal(pv.Name)) Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID)) }
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) { cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{}) if err != nil { return nil, err } return cm, nil }
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it // to flip to Ready, log its output and delete it. func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { path := "test/images/clusterapi-tester/pod.yaml" p, err := podFromManifest(path) if err != nil { framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) return } p.Namespace = ns if _, err := c.Core().Pods(ns).Create(p); err != nil { framework.Logf("Failed to create %v: %v", p.Name, err) return } defer func() { if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil { framework.Logf("Failed to delete pod %v: %v", p.Name, err) } }() timeout := 5 * time.Minute if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) return } logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) if err != nil { framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err) } else { framework.Logf("Output of clusterapi-tester:\n%v", logs) } }
// Delete the PVC and wait for the PV to become Available again. Validate that the PV // has recycled (assumption here about reclaimPolicy). Caller tells this func which // phase value to expect for the pv bound to the to-be-deleted claim. func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expctPVPhase v1.PersistentVolumePhase) { pvname := pvc.Spec.VolumeName framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname) deletePersistentVolumeClaim(c, pvc.Name, ns) // Check that the PVC is really deleted. pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) Expect(apierrs.IsNotFound(err)).To(BeTrue()) // Wait for the PV's phase to return to Available framework.Logf("Waiting for recycling process to complete.") err = framework.WaitForPersistentVolumePhase(expctPVPhase, c, pv.Name, 1*time.Second, 300*time.Second) Expect(err).NotTo(HaveOccurred()) // examine the pv's ClaimRef and UID and compare to expected values pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) cr := pv.Spec.ClaimRef if expctPVPhase == v1.VolumeAvailable { if cr != nil { // may be ok if cr != nil Expect(len(cr.UID)).To(BeZero()) } } else if expctPVPhase == v1.VolumeBound { Expect(cr).NotTo(BeNil()) Expect(len(cr.UID)).NotTo(BeZero()) } framework.Logf("PV %v now in %q phase", pv.Name, expctPVPhase) }
// updates labels of nodes given by nodeNames. // In case a given label already exists, it overwrites it. If label to remove doesn't exist // it silently ignores it. // TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) { const maxRetries = 5 for nodeName := range nodeNames { var node *v1.Node var err error for i := 0; i < maxRetries; i++ { node, err = c.Core().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { framework.Logf("Error getting node %s: %v", nodeName, err) continue } if toAdd != nil { for k, v := range toAdd { node.ObjectMeta.Labels[k] = v } } if toRemove != nil { for k := range toRemove { delete(node.ObjectMeta.Labels, k) } } _, err = c.Core().Nodes().Update(node) if err != nil { framework.Logf("Error updating node %s: %v", nodeName, err) } else { break } } Expect(err).NotTo(HaveOccurred()) } }
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error { rc := &v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Name: controllerName, }, Spec: v1.ReplicationControllerSpec{ Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount), Selector: map[string]string{"name": controllerName}, Template: &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"name": controllerName}, }, Spec: podTemplate.Spec, }, }, } var err error for attempt := 0; attempt < retries; attempt++ { if _, err := client.Core().ReplicationControllers(namespace).Create(rc); err == nil { return nil } glog.Errorf("Error while creating rc, maybe retry: %v", err) } return fmt.Errorf("Terminal error while creating rc, won't retry: %v", err) }
// Simplified version of RunRC, that does not create RC, but creates plain Pods. // Optionally waits for pods to start running (if waitForRunning == true). // The number of replicas must be non-zero. func StartPods(c clientset.Interface, replicas int, namespace string, podNamePrefix string, pod v1.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error { // no pod to start if replicas < 1 { panic("StartPods: number of replicas must be non-zero") } startPodsID := string(uuid.NewUUID()) // So that we can label and find them for i := 0; i < replicas; i++ { podName := fmt.Sprintf("%v-%v", podNamePrefix, i) pod.ObjectMeta.Name = podName pod.ObjectMeta.Labels["name"] = podName pod.ObjectMeta.Labels["startPodsID"] = startPodsID pod.Spec.Containers[0].Name = podName _, err := c.Core().Pods(namespace).Create(&pod) if err != nil { return err } } logFunc("Waiting for running...") if waitForRunning { label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID})) err := WaitForPodsWithLabelRunning(c, namespace, label) if err != nil { return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err) } } return nil }
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error { _, err := c.Core().ConfigMaps(api.NamespaceSystem).Update(configMap) if err != nil { return err } framework.Logf("DNS autoscaling ConfigMap updated.") return nil }
// NewReplicationManager creates a replication manager func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) return newReplicationManager( eventBroadcaster.NewRecorder(v1.EventSource{Component: "replication-controller"}), podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) }
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient { return &HeapsterMetricsClient{ services: client.Core().Services(namespace), podsGetter: client.Core(), heapsterScheme: scheme, heapsterService: service, heapsterPort: port, } }
// Delete the Claim func deletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) { if c != nil && len(pvcName) > 0 { framework.Logf("Deleting PersistentVolumeClaim %v", pvcName) err := c.Core().PersistentVolumeClaims(ns).Delete(pvcName, nil) if err != nil && !apierrs.IsNotFound(err) { Expect(err).NotTo(HaveOccurred()) } } }
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { rc, err := c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) if err != nil { return err } *(rc.Spec.Replicas) = replicas _, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc) return err }
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) { g := containerResourceGatherer{ client: c, stopCh: make(chan struct{}), containerIDToNameMap: make(map[string]string), containerIDs: make([]string, 0), options: options, } if options.inKubemark { g.workerWg.Add(1) g.workers = append(g.workers, resourceGatherWorker{ inKubemark: true, stopCh: g.stopCh, wg: &g.workerWg, finished: false, }) } else { pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{}) if err != nil { Logf("Error while listing Pods: %v", err) return nil, err } for _, pod := range pods.Items { for _, container := range pod.Status.ContainerStatuses { containerID := strings.TrimPrefix(container.ContainerID, "docker:/") g.containerIDToNameMap[containerID] = pod.Name + "/" + container.Name g.containerIDs = append(g.containerIDs, containerID) } } nodeList, err := c.Core().Nodes().List(v1.ListOptions{}) if err != nil { Logf("Error while listing Nodes: %v", err) return nil, err } for _, node := range nodeList.Items { if !options.masterOnly || system.IsMasterNode(node.Name) { g.workerWg.Add(1) g.workers = append(g.workers, resourceGatherWorker{ c: c, nodeName: node.Name, wg: &g.workerWg, containerIDToNameMap: g.containerIDToNameMap, containerIDs: g.containerIDs, stopCh: g.stopCh, finished: false, inKubemark: false, }) if options.masterOnly { break } } } } return &g, nil }
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { pod, err := c.Core().Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) if err == nil { framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) } return err }
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error { var zero int64 glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name) err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero}) if err == nil { glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name) } return err }