// GetPetSetDetail gets pet set details.
func GetPetSetDetail(client *k8sClient.Client, heapsterClient client.HeapsterClient,
	namespace, name string) (*PetSetDetail, error) {

	log.Printf("Getting details of %s service in %s namespace", name, namespace)

	// TODO(floreks): Use channels.
	petSetData, err := client.Apps().PetSets(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	channels := &common.ResourceChannels{
		PodList: common.GetPodListChannel(client, common.NewSameNamespaceQuery(namespace), 1),
	}

	pods := <-channels.PodList.List
	if err := <-channels.PodList.Error; err != nil {
		return nil, err
	}

	events, err := GetPetSetEvents(client, petSetData.Namespace, petSetData.Name)
	if err != nil {
		return nil, err
	}

	petSet := getPetSetDetail(petSetData, heapsterClient, events, pods.Items)
	return &petSet, nil
}
// GetPetSetDetail gets pet set details.
func GetPetSetDetail(client *k8sClient.Client, heapsterClient client.HeapsterClient,
	namespace, name string) (*PetSetDetail, error) {

	log.Printf("Getting details of %s service in %s namespace", name, namespace)

	// TODO(floreks): Use channels.
	petSetData, err := client.Apps().PetSets(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	podList, err := GetPetSetPods(client, heapsterClient, dataselect.DefaultDataSelectWithMetrics, name, namespace)
	if err != nil {
		return nil, err
	}

	podInfo, err := getPetSetPodInfo(client, petSetData)
	if err != nil {
		return nil, err
	}

	events, err := GetPetSetEvents(client, dataselect.DefaultDataSelect, petSetData.Namespace, petSetData.Name)
	if err != nil {
		return nil, err
	}

	petSet := getPetSetDetail(petSetData, heapsterClient, *events, *podList, *podInfo)
	return &petSet, nil
}
Exemple #3
0
// updatePetCount attempts to update the Status.Replicas of the given PetSet, with a single GET/PUT retry.
func updatePetCount(kubeClient *client.Client, ps apps.PetSet, numPets int) (updateErr error) {
	if ps.Status.Replicas == numPets || kubeClient == nil {
		return nil
	}
	psClient := kubeClient.Apps().PetSets(ps.Namespace)
	var getErr error
	for i, ps := 0, &ps; ; i++ {
		glog.V(4).Infof(fmt.Sprintf("Updating replica count for PetSet: %s/%s, ", ps.Namespace, ps.Name) +
			fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas))

		ps.Status = apps.PetSetStatus{Replicas: numPets}
		_, updateErr = psClient.UpdateStatus(ps)
		if updateErr == nil || i >= statusUpdateRetries {
			return updateErr
		}
		if ps, getErr = psClient.Get(ps.Name); getErr != nil {
			return getErr
		}
	}
}
// GetPetSetList returns a list of all Pet Sets in the cluster.
func GetPetSetList(client *client.Client, nsQuery *common.NamespaceQuery) (*PetSetList, error) {
	log.Printf("Getting list of all pet sets in the cluster")

	channels := &common.ResourceChannels{
		PetSetList: common.GetPetSetListChannel(client.Apps(), nsQuery, 1),
		PodList:    common.GetPodListChannel(client, nsQuery, 1),
		EventList:  common.GetEventListChannel(client, nsQuery, 1),
	}

	return GetPetSetListFromChannels(channels)
}
// GetPetSetPodsEvents gets events associated to pods in pet set.
func GetPetSetPodsEvents(client *client.Client, namespace, petSetName string) (
	[]api.Event, error) {

	petSet, err := client.Apps().PetSets(namespace).Get(petSetName)

	if err != nil {
		return nil, err
	}

	podEvents, err := event.GetPodsEvents(client, namespace, petSet.Spec.Selector.MatchLabels)

	if err != nil {
		return nil, err
	}

	return podEvents, nil
}
Exemple #6
0
// GetWorkloads returns a list of all workloads in the cluster.
func GetWorkloads(client *k8sClient.Client, heapsterClient client.HeapsterClient,
	nsQuery *common.NamespaceQuery, metricQuery *dataselect.MetricQuery) (*Workloads, error) {

	log.Printf("Getting lists of all workloads")
	channels := &common.ResourceChannels{
		ReplicationControllerList: common.GetReplicationControllerListChannel(client, nsQuery, 1),
		ReplicaSetList:            common.GetReplicaSetListChannel(client.Extensions(), nsQuery, 1),
		JobList:                   common.GetJobListChannel(client.Batch(), nsQuery, 1),
		DaemonSetList:             common.GetDaemonSetListChannel(client.Extensions(), nsQuery, 1),
		DeploymentList:            common.GetDeploymentListChannel(client.Extensions(), nsQuery, 1),
		PetSetList:                common.GetPetSetListChannel(client.Apps(), nsQuery, 1),
		ServiceList:               common.GetServiceListChannel(client, nsQuery, 1),
		PodList:                   common.GetPodListChannel(client, nsQuery, 7),
		EventList:                 common.GetEventListChannel(client, nsQuery, 6),
	}

	return GetWorkloadsFromChannels(channels, heapsterClient, metricQuery)
}
// Returns array of api pods targeting pet set with given name.
func getRawPetSetPods(client *k8sClient.Client, petSetName, namespace string) (
	[]api.Pod, error) {

	petSet, err := client.Apps().PetSets(namespace).Get(petSetName)
	if err != nil {
		return nil, err
	}

	channels := &common.ResourceChannels{
		PodList: common.GetPodListChannel(client, common.NewSameNamespaceQuery(namespace), 1),
	}

	podList := <-channels.PodList.List
	if err := <-channels.PodList.Error; err != nil {
		return nil, err
	}

	matchingPods := common.FilterNamespacedPodsByLabelSelector(podList.Items,
		petSet.ObjectMeta.Namespace, petSet.Spec.Selector)
	return matchingPods, nil
}
Exemple #8
0
func deleteAllPetSets(c *client.Client, ns string) {
	pst := &petSetTester{c: c}
	psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
	ExpectNoError(err)

	// Scale down each petset, then delete it completely.
	// Deleting a pvc without doing this will leak volumes, #25101.
	errList := []string{}
	for _, ps := range psList.Items {
		framework.Logf("Scaling petset %v to 0", ps.Name)
		if err := pst.scale(&ps, 0); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
		framework.Logf("Deleting petset %v", ps.Name)
		if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
	}

	// pvs are global, so we need to wait for the exact ones bound to the petset pvcs.
	pvNames := sets.NewString()
	// TODO: Don't assume all pvcs in the ns belong to a petset
	pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
			return false, nil
		}
		for _, pvc := range pvcList.Items {
			pvNames.Insert(pvc.Spec.VolumeName)
			// TODO: Double check that there are no pods referencing the pvc
			framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
			if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil {
				return false, nil
			}
		}
		return true, nil
	})
	if pvcPollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
	}

	pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
			return false, nil
		}
		waitingFor := []string{}
		for _, pv := range pvList.Items {
			if pvNames.Has(pv.Name) {
				waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
			}
		}
		if len(waitingFor) == 0 {
			return true, nil
		}
		framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n"))
		return false, nil
	})
	if pollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
	}
	if len(errList) != 0 {
		ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
	}
}
Exemple #9
0
		})

		AfterEach(func() {
			if CurrentGinkgoTestDescription().Failed {
				dumpDebugInfo(c, ns)
			}
			framework.Logf("Deleting all petset in ns %v", ns)
			deleteAllPetSets(c, ns)
		})

		It("should provide basic identity [Feature:PetSet]", func() {
			By("creating petset " + psName + " in namespace " + ns)
			petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
			podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
			ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
			_, err := c.Apps().PetSets(ns).Create(ps)
			Expect(err).NotTo(HaveOccurred())

			pst := petSetTester{c: c}

			By("Saturating pet set " + ps.Name)
			pst.saturate(ps)

			By("Verifying petset mounted data directory is usable")
			ExpectNoError(pst.checkMount(ps, "/data"))

			cmd := "echo $(hostname) > /data/hostname; sync;"
			By("Running " + cmd + " in all pets")
			ExpectNoError(pst.execInPets(ps, cmd))

			By("Restarting pet set " + ps.Name)
Exemple #10
0
	BeforeEach(func() {
		var err error
		c, err = framework.LoadClient()
		Expect(err).NotTo(HaveOccurred())
		ns = f.Namespace.Name

		By("creating service " + headlessSvcName + " in namespace " + ns)
		headlessService := createServiceSpec(headlessSvcName, true, labels)
		_, err = c.Services(ns).Create(headlessService)
		Expect(err).NotTo(HaveOccurred())
	})

	It("provide basic identity [Feature:PetSet]", func() {
		By("creating petset " + psName + " in namespace " + ns)
		defer func() {
			err := c.Apps().PetSets(ns).Delete(psName, nil)
			Expect(err).NotTo(HaveOccurred())
		}()

		petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
		podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
		ps := newPetSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
		_, err := c.Apps().PetSets(ns).Create(ps)
		Expect(err).NotTo(HaveOccurred())

		pt := petTester{c: c}

		By("Saturating pet set " + ps.Name)
		pt.saturate(ps)

		cmd := "echo $(hostname) > /data/hostname"
Exemple #11
0
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
		options := api.ListOptions{LabelSelector: selector}
		deploymentList, err := c.Deployments(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		psList, err := c.Apps().PetSets(api.NamespaceSystem).List(options)
		if err != nil {
			return nil, err
		}
		if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		// Check all the replication controllers.
		for _, rc := range rcList.Items {
			selector := labels.Set(rc.Spec.Selector).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// Do the same for all deployments.
		for _, rc := range deploymentList.Items {
			selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
		// And for pet sets.
		for _, ps := range psList.Items {
			selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
			options := api.ListOptions{LabelSelector: selector}
			podList, err := c.Pods(api.NamespaceSystem).List(options)
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				if pod.DeletionTimestamp != nil {
					continue
				}
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}