コード例 #1
1
ファイル: density.go プロジェクト: hongbin/kubernetes
func createRunningPod(wg *sync.WaitGroup, c *client.Client, name, ns, image string, labels map[string]string) {
	defer GinkgoRecover()
	defer wg.Done()
	pod := &api.Pod{
		TypeMeta: unversioned.TypeMeta{
			Kind: "Pod",
		},
		ObjectMeta: api.ObjectMeta{
			Name:   name,
			Labels: labels,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  name,
					Image: image,
				},
			},
			DNSPolicy: api.DNSDefault,
		},
	}
	_, err := c.Pods(ns).Create(pod)
	expectNoError(err)
	expectNoError(waitForPodRunningInNamespace(c, name, ns))
}
コード例 #2
1
ファイル: deploy.go プロジェクト: fabric8io/gofabric8
func addServiceAccount(c *k8sclient.Client, f *cmdutil.Factory, name string) (Result, error) {
	ns, _, e := f.DefaultNamespace()
	if e != nil {
		util.Fatal("No default namespace")
		return Failure, e
	}
	sas := c.ServiceAccounts(ns)
	_, err := sas.Get(name)
	if err != nil {
		sa := kapi.ServiceAccount{
			ObjectMeta: kapi.ObjectMeta{
				Name: name,
				Labels: map[string]string{
					"provider": "fabric8.io",
				},
			},
		}
		_, err = sas.Create(&sa)
	}
	r := Success
	if err != nil {
		r = Failure
	}
	return r, err
}
コード例 #3
0
// Waits until all existing pods are scheduled and returns their amount.
func waitForStableCluster(c *client.Client) int {
	timeout := 10 * time.Minute
	startTime := time.Now()

	allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
	framework.ExpectNoError(err)
	// API server returns also Pods that succeeded. We need to filter them out.
	currentPods := make([]api.Pod, 0, len(allPods.Items))
	for _, pod := range allPods.Items {
		if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
			currentPods = append(currentPods, pod)
		}
	}
	allPods.Items = currentPods
	scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods)
	for len(currentlyNotScheduledPods) != 0 {
		time.Sleep(2 * time.Second)

		allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
		framework.ExpectNoError(err)
		scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods)

		if startTime.Add(timeout).Before(time.Now()) {
			framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
			break
		}
	}
	return len(scheduledPods)
}
コード例 #4
0
ファイル: service.go プロジェクト: johnmccawley/origin
// Creates a replication controller that serves its hostname and a service on top of it.
func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) {
	podNames := make([]string, replicas)

	By("creating service " + name + " in namespace " + ns)
	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: util.NewIntOrStringFromInt(9376),
				Protocol:   "TCP",
			}},
			Selector: map[string]string{
				"name": name,
			},
		},
	})
	if err != nil {
		return podNames, "", err
	}

	var createdPods []*api.Pod
	maxContainerFailures := 0
	config := RCConfig{
		Client:               c,
		Image:                "gcr.io/google_containers/serve_hostname:1.1",
		Name:                 name,
		Namespace:            ns,
		PollInterval:         3 * time.Second,
		Timeout:              podReadyBeforeTimeout,
		Replicas:             replicas,
		CreatedPods:          &createdPods,
		MaxContainerFailures: &maxContainerFailures,
	}
	err = RunRC(config)
	if err != nil {
		return podNames, "", err
	}

	if len(createdPods) != replicas {
		return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods))
	}

	for i := range createdPods {
		podNames[i] = createdPods[i].ObjectMeta.Name
	}
	sort.StringSlice(podNames).Sort()

	service, err := c.Services(ns).Get(name)
	if err != nil {
		return podNames, "", err
	}
	if service.Spec.ClusterIP == "" {
		return podNames, "", fmt.Errorf("Service IP is blank for %v", name)
	}
	serviceIP := service.Spec.ClusterIP
	return podNames, serviceIP, nil
}
コード例 #5
0
func createPodWithSpec(c *client.Client, pod *api.Pod) (*api.Pod, error) {
	// Manually assign pod to node because we don't run the scheduler in node
	// e2e tests.
	pod.Spec.NodeName = framework.TestContext.NodeName
	createdPod, err := c.Pods(pod.Namespace).Create(pod)
	return createdPod, err
}
コード例 #6
0
ファイル: run.go プロジェクト: spxtr/contrib
func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) (status api.PodPhase, err error) {
	for {
		pod, err := c.Pods(pod.Namespace).Get(pod.Name)
		if err != nil {
			return api.PodUnknown, err
		}
		ready := false
		if pod.Status.Phase == api.PodRunning {
			ready = true
			for _, status := range pod.Status.ContainerStatuses {
				if !status.Ready {
					ready = false
					break
				}
			}
			if ready {
				return api.PodRunning, nil
			}
		}
		if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
			return pod.Status.Phase, nil
		}
		fmt.Fprintf(out, "Waiting for pod %s/%s to be running, status is %s, pod ready: %v\n", pod.Namespace, pod.Name, pod.Status.Phase, ready)
		time.Sleep(2 * time.Second)
		continue
	}
}
コード例 #7
0
ファイル: integration.go プロジェクト: Vitamen/kubernetes
func runMasterServiceTest(client *client.Client) {
	time.Sleep(12 * time.Second)
	svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything())
	if err != nil {
		glog.Fatalf("unexpected error listing services: %v", err)
	}
	var foundRW bool
	found := util.StringSet{}
	for i := range svcList.Items {
		found.Insert(svcList.Items[i].Name)
		if svcList.Items[i].Name == "kubernetes" {
			foundRW = true
		}
	}
	if foundRW {
		ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
		if err != nil {
			glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err)
		}
		if countEndpoints(ep) == 0 {
			glog.Fatalf("no endpoints for kubernetes service: %v", ep)
		}
	} else {
		glog.Errorf("no RW service found: %v", found)
		glog.Fatal("Kubernetes service test failed")
	}
	glog.Infof("Master service test passed.")
}
コード例 #8
0
ファイル: resize_nodes.go プロジェクト: remoteur/kubernetes
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	timeout := 2 * time.Minute
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		created := []api.Pod{}
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp != nil {
				continue
			}
			created = append(created, pod)
		}
		Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)

		if len(created) == replicas {
			pods.Items = created
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
コード例 #9
0
ファイル: master_config.go プロジェクト: vikaslaad/origin
func newProjectAuthorizationCache(authorizer authorizer.Authorizer, kubeClient *kclient.Client, policyClient policyclient.ReadOnlyPolicyClient) *projectauth.AuthorizationCache {
	return projectauth.NewAuthorizationCache(
		projectauth.NewAuthorizerReviewer(authorizer),
		kubeClient.Namespaces(),
		policyClient,
	)
}
コード例 #10
0
ファイル: main.go プロジェクト: signalfx/cadvisor-integration
func updateNodes(kubeClient *kube.Client, cPort int) (hostIPtoNodeMap map[string]kubeAPI.Node, nodeIPs []string) {

	hostIPtoNodeMap = make(map[string]kubeAPI.Node, 2)
	nodeIPs = make([]string, 0, 2)
	nodeList, apiErr := kubeClient.Nodes().List(kubeLabels.Everything(), kubeFields.Everything())
	if apiErr != nil {
		glog.Errorf("Failed to list kubernetes nodes. Error: %v\n", apiErr)
	} else {
		for _, node := range nodeList.Items {
			var hostIP string
			for _, nodeAddress := range node.Status.Addresses {
				switch nodeAddress.Type {
				case kubeAPI.NodeInternalIP:
					hostIP = nodeAddress.Address
					break
				case kubeAPI.NodeLegacyHostIP:
					hostIP = nodeAddress.Address
				}
			}
			if hostIP != "" {
				hostIP = fmt.Sprintf("http://%v:%v", hostIP, cPort)
				nodeIPs = append(nodeIPs, hostIP)
				hostIPtoNodeMap[hostIP] = node
			}
		}
	}

	return hostIPtoNodeMap, nodeIPs
}
コード例 #11
0
ファイル: service.go プロジェクト: fabric8io/gofabric8
func openService(ns string, serviceName string, c *k8sclient.Client, printURL bool, retry bool) {
	if retry {
		if err := RetryAfter(40, func() error { return CheckService(ns, serviceName, c) }, 10*time.Second); err != nil {
			util.Errorf("Could not find finalized endpoint being pointed to by %s: %v", serviceName, err)
			os.Exit(1)
		}
	}
	svcs, err := c.Services(ns).List(kubeApi.ListOptions{})
	if err != nil {
		util.Errorf("No services found %v\n", err)
	}
	found := false
	for _, service := range svcs.Items {
		if serviceName == service.Name {

			url := service.ObjectMeta.Annotations[exposeURLAnnotation]

			if printURL {
				util.Successf("%s\n", url)
			} else {
				util.Successf("\nOpening URL %s\n", url)
				browser.OpenURL(url)
			}
			found = true
			break
		}
	}
	if !found {
		util.Errorf("No service %s in namespace %s\n", serviceName, ns)
	}
}
コード例 #12
0
ファイル: deploy.go プロジェクト: fabric8io/gofabric8
func processResource(c *k8sclient.Client, b []byte, ns string, kind string) error {
	util.Infof("Processing resource kind: %s in namespace %s\n", kind, ns)
	req := c.Post().Body(b)
	if kind == "Deployment" {
		req.AbsPath("apis", "extensions/v1beta1", "namespaces", ns, strings.ToLower(kind+"s"))
	} else if kind == "BuildConfig" || kind == "DeploymentConfig" || kind == "Template" || kind == "PolicyBinding" || kind == "Role" || kind == "RoleBinding" {
		req.AbsPath("oapi", "v1", "namespaces", ns, strings.ToLower(kind+"s"))
	} else if kind == "OAuthClient" || kind == "Project" || kind == "ProjectRequest" {
		req.AbsPath("oapi", "v1", strings.ToLower(kind+"s"))
	} else if kind == "Namespace" {
		req.AbsPath("api", "v1", "namespaces")
	} else {
		req.Namespace(ns).Resource(strings.ToLower(kind + "s"))
	}
	res := req.Do()
	if res.Error() != nil {
		err := res.Error()
		if err != nil {
			util.Warnf("Failed to create %s: %v", kind, err)
			return err
		}
	}
	var statusCode int
	res.StatusCode(&statusCode)
	if statusCode != http.StatusCreated {
		return fmt.Errorf("Failed to create %s: %d", kind, statusCode)
	}
	return nil
}
コード例 #13
0
ファイル: deploy.go プロジェクト: fabric8io/gofabric8
func addIngressInfraLabel(c *k8sclient.Client, ns string) string {
	nodeClient := c.Nodes()
	nodes, err := nodeClient.List(api.ListOptions{})
	if err != nil {
		util.Errorf("\nUnable to find any nodes: %s\n", err)
	}
	changed := false
	hasExistingExposeIPLabel, externalNodeName := hasExistingLabel(nodes, externalIPLabel)
	if externalNodeName != "" {
		return externalNodeName
	}
	if !hasExistingExposeIPLabel && len(nodes.Items) > 0 {
		for _, node := range nodes.Items {
			if !node.Spec.Unschedulable {
				changed = addLabelIfNotExist(&node.ObjectMeta, externalIPLabel, "true")
				if changed {
					_, err = nodeClient.Update(&node)
					if err != nil {
						printError("Failed to label node with ", err)
					}
					return node.Name
				}
			}
		}
	}
	if !changed && !hasExistingExposeIPLabel {
		util.Warnf("Unable to add label for ingress controller to run on a specific node, please add manually: kubectl label node [your node name] %s=true", externalIPLabel)
	}
	return ""
}
コード例 #14
0
ファイル: deploy.go プロジェクト: fabric8io/gofabric8
func createMissingPVs(c *k8sclient.Client, ns string) {
	found, pvcs, pendingClaimNames := findPendingPVs(c, ns)
	if found {
		sshCommand := ""
		createPV(c, ns, pendingClaimNames, sshCommand)
		items := pvcs.Items
		for _, item := range items {
			status := item.Status.Phase
			if status == api.ClaimPending || status == api.ClaimLost {
				err := c.PersistentVolumeClaims(ns).Delete(item.ObjectMeta.Name)
				if err != nil {
					util.Infof("Error deleting PVC %s\n", item.ObjectMeta.Name)
				} else {
					util.Infof("Recreating PVC %s\n", item.ObjectMeta.Name)

					c.PersistentVolumeClaims(ns).Create(&api.PersistentVolumeClaim{
						ObjectMeta: api.ObjectMeta{
							Name:      item.ObjectMeta.Name,
							Namespace: ns,
						},
						Spec: api.PersistentVolumeClaimSpec{
							VolumeName:  ns + "-" + item.ObjectMeta.Name,
							AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
							Resources: api.ResourceRequirements{
								Requests: api.ResourceList{
									api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
								},
							},
						},
					})
				}
			}
		}
	}
}
コード例 #15
0
ファイル: kubelet.go プロジェクト: ncdc/kubernetes
// updates labels of nodes given by nodeNames.
// In case a given label already exists, it overwrites it. If label to remove doesn't exist
// it silently ignores it.
// TODO: migrate to use framework.AddOrUpdateLabelOnNode/framework.RemoveLabelOffNode
func updateNodeLabels(c *client.Client, nodeNames sets.String, toAdd, toRemove map[string]string) {
	const maxRetries = 5
	for nodeName := range nodeNames {
		var node *api.Node
		var err error
		for i := 0; i < maxRetries; i++ {
			node, err = c.Nodes().Get(nodeName)
			if err != nil {
				framework.Logf("Error getting node %s: %v", nodeName, err)
				continue
			}
			if toAdd != nil {
				for k, v := range toAdd {
					node.ObjectMeta.Labels[k] = v
				}
			}
			if toRemove != nil {
				for k := range toRemove {
					delete(node.ObjectMeta.Labels, k)
				}
			}
			_, err = c.Nodes().Update(node)
			if err != nil {
				framework.Logf("Error updating node %s: %v", nodeName, err)
			} else {
				break
			}
		}
		Expect(err).NotTo(HaveOccurred())
	}
}
コード例 #16
0
ファイル: integration.go プロジェクト: koori02/kubernetes
// podsOnNodes returns true when all of the selected pods exist on a node.
func podsOnNodes(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
	// Wait until all pods are running on the node.
	return func() (bool, error) {
		options := api.ListOptions{LabelSelector: labelSelector}
		pods, err := c.Pods(podNamespace).List(options)
		if err != nil {
			glog.Infof("Unable to get pods to list: %v", err)
			return false, nil
		}
		for i := range pods.Items {
			pod := pods.Items[i]
			podString := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
			glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName)
			if len(pod.Spec.NodeName) == 0 {
				glog.Infof("Pod %q is not bound to a host yet", podString)
				return false, nil
			}
			if pod.Status.Phase != api.PodRunning {
				glog.Infof("Pod %q is not running, status: %v", podString, pod.Status.Phase)
				return false, nil
			}
		}
		return true, nil
	}
}
コード例 #17
0
ファイル: utils.go プロジェクト: upmc-enterprises/contrib
// getClusterNodesIP returns the IP address of each node in the kubernetes cluster
func getClusterNodesIP(kubeClient *unversioned.Client, nodeSelector string) (clusterNodes []string) {
	listOpts := api.ListOptions{}

	if nodeSelector != "" {
		label, err := labels.Parse(nodeSelector)
		if err != nil {
			glog.Fatalf("'%v' is not a valid selector: %v", nodeSelector, err)
		}
		listOpts.LabelSelector = label
	}

	nodes, err := kubeClient.Nodes().List(listOpts)
	if err != nil {
		glog.Fatalf("Error getting running nodes: %v", err)
	}

	for _, nodo := range nodes.Items {
		nodeIP, err := node.GetNodeHostIP(&nodo)
		if err == nil {
			clusterNodes = append(clusterNodes, nodeIP.String())
		}
	}
	sort.Strings(clusterNodes)

	return
}
コード例 #18
0
func testOne(t *testing.T, client *kclient.Client, namespace, addrType string, success bool) *kapi.Endpoints {
	testEndpoint := &kapi.Endpoints{}
	testEndpoint.GenerateName = "test"
	testEndpoint.Subsets = []kapi.EndpointSubset{
		{
			Addresses: []kapi.EndpointAddress{
				{
					IP: exampleAddresses[addrType],
				},
			},
			Ports: []kapi.EndpointPort{
				{
					Port:     9999,
					Protocol: kapi.ProtocolTCP,
				},
			},
		},
	}

	ep, err := client.Endpoints(namespace).Create(testEndpoint)
	if err != nil && success {
		t.Fatalf("unexpected error creating %s network endpoint: %v", addrType, err)
	} else if err == nil && !success {
		t.Fatalf("unexpected success creating %s network endpoint", addrType)
	}
	return ep
}
コード例 #19
0
ファイル: utils.go プロジェクト: upmc-enterprises/contrib
// waitForPodCondition waits for a pod in state defined by a condition (func)
func waitForPodCondition(kubeClient *unversioned.Client, ns, podName string, condition func(pod *api.Pod) (bool, error),
	interval, timeout time.Duration) error {
	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
		pod, err := kubeClient.Pods(ns).Get(podName)
		if err != nil {
			if apierrs.IsNotFound(err) {
				return false, nil
			}
		}

		done, err := condition(pod)
		if err != nil {
			return false, err
		}
		if done {
			return true, nil
		}

		return false, nil
	})

	if err != nil {
		return fmt.Errorf("timed out waiting to observe own status as Running")
	}

	return nil
}
コード例 #20
0
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(namespace).Get(oldName)
	if err != nil {
		return nil, err
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 {
		// TODO: support multi-container image update.
		return nil, goerrors.New("Image update is not supported for multi-container pods")
	}
	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[0].Image = image

	newHash, err := api.HashObject(newRc, c.Codec)
	if err != nil {
		return nil, err
	}

	if len(newName) == 0 {
		newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = newName

	newRc.Spec.Selector[deploymentKey] = newHash
	newRc.Spec.Template.Labels[deploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
コード例 #21
0
ファイル: nodes.go プロジェクト: timstclair/kube-contrib
// GetRequiredPodsForNode returns a list od pods that would appear on the node if the
// node was just created (like deamonset and manifest-run pods). It reuses kubectl
// drain command to get the list.
func GetRequiredPodsForNode(nodename string, client *kube_client.Client) ([]*kube_api.Pod, error) {
	podsToRemoveList, _, _, err := cmd.GetPodsForDeletionOnNodeDrain(client, nodename,
		kube_api.Codecs.UniversalDecoder(), true, true)
	if err != nil {
		return []*kube_api.Pod{}, err
	}

	podsToRemoveMap := make(map[string]struct{})
	for _, pod := range podsToRemoveList {
		podsToRemoveMap[pod.SelfLink] = struct{}{}
	}

	allPodList, err := client.Pods(kube_api.NamespaceAll).List(
		kube_api.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodename})})
	if err != nil {
		return []*kube_api.Pod{}, err
	}

	podsOnNewNode := make([]*kube_api.Pod, 0)
	for i, pod := range allPodList.Items {
		if _, found := podsToRemoveMap[pod.SelfLink]; !found {
			podsOnNewNode = append(podsOnNewNode, &allPodList.Items[i])
		}
	}
	return podsOnNewNode, nil
}
コード例 #22
0
// Retrieves metrics information.
func getMetrics(c *client.Client) (string, error) {
	body, err := c.Get().AbsPath("/metrics").DoRaw()
	if err != nil {
		return "", err
	}
	return string(body), nil
}
コード例 #23
0
func checkMirrorPodDisappear(cl *client.Client, name, namespace string) error {
	_, err := cl.Pods(namespace).Get(name)
	if errors.IsNotFound(err) {
		return nil
	}
	return goerrors.New("pod not disappear")
}
コード例 #24
0
func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, replicas int, cpuLimitMillis, memLimitMb int64) {
	By(fmt.Sprintf("Running consuming RC %s with %v replicas", name, replicas))
	_, err := c.Services(ns).Create(&api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ServiceSpec{
			Ports: []api.ServicePort{{
				Port:       port,
				TargetPort: util.NewIntOrStringFromInt(targetPort),
			}},

			Selector: map[string]string{
				"name": name,
			},
		},
	})
	expectNoError(err)
	config := RCConfig{
		Client:     c,
		Image:      resourceConsumerImage,
		Name:       name,
		Namespace:  ns,
		Timeout:    timeoutRC,
		Replicas:   replicas,
		CpuRequest: cpuLimitMillis,
		CpuLimit:   cpuLimitMillis,
		MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
		MemLimit:   memLimitMb * 1024 * 1024,
	}
	expectNoError(RunRC(config))
	// Make sure endpoints are propagated.
	// TODO(piosz): replace sleep with endpoints watch.
	time.Sleep(10 * time.Second)
}
コード例 #25
0
func verifyResult(c *client.Client, podName string, ns string, oldNotScheduled int) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	scheduledPods, notScheduledPods := getPodsScheduled(allPods)

	schedEvents, err := c.Events(ns).List(
		labels.Everything(),
		fields.Set{
			"involvedObject.kind":      "Pod",
			"involvedObject.name":      podName,
			"involvedObject.namespace": ns,
			"source":                   "scheduler",
			"reason":                   "FailedScheduling",
		}.AsSelector())
	expectNoError(err)

	printed := false
	printOnce := func(msg string) string {
		if !printed {
			printed = true
			return msg
		} else {
			return ""
		}
	}

	Expect(len(notScheduledPods)).To(Equal(1+oldNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
	Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
コード例 #26
0
ファイル: nodeoutofdisk.go プロジェクト: vjsamuel/kubernetes
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
	podClient := c.Pods(ns)

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: "beta.gcr.io/google_containers/pause:2.0",
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	expectNoError(err)
}
コード例 #27
0
ファイル: monitoring.go プロジェクト: richm/origin
// Query sends a command to the server and returns the Response
func Query(c *client.Client, query string) (*influxdb.Response, error) {
	result, err := c.Get().
		Prefix("proxy").
		Namespace("kube-system").
		Resource("services").
		Name(influxdbService+":api").
		Suffix("query").
		Param("q", query).
		Param("db", influxdbDatabaseName).
		Param("epoch", "s").
		Do().
		Raw()

	if err != nil {
		return nil, err
	}

	var response influxdb.Response
	dec := json.NewDecoder(bytes.NewReader(result))
	dec.UseNumber()
	err = dec.Decode(&response)

	if err != nil {
		return nil, err
	}
	return &response, nil
}
コード例 #28
0
ファイル: pods.go プロジェクト: Vitamen/kubernetes
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
	podClient := c.Pods(ns)
	By("creating pod")
	defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
	if _, err := podClient.Create(pod); err != nil {
		Failf("Failed to create pod: %v", err)
	}
	By("ensuring that pod is running and has a hostIP")
	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	err := waitForPodRunningInNamespace(c, pod.Name, ns)
	Expect(err).NotTo(HaveOccurred())
	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t := time.Now()
	for {
		p, err := podClient.Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		if p.Status.HostIP != "" {
			Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
			break
		}
		if time.Since(t) >= hostIPTimeout {
			Failf("Gave up waiting for hostIP of pod %s after %v seconds",
				p.Name, time.Since(t).Seconds())
		}
		Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}
コード例 #29
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
コード例 #30
-1
ファイル: deploy.go プロジェクト: fabric8io/gofabric8
func loadTemplateData(ns string, templateName string, c *k8sclient.Client, oc *oclient.Client) ([]byte, string, error) {
	typeOfMaster := util.TypeOfMaster(c)
	if typeOfMaster == util.Kubernetes {
		catalogName := "catalog-" + templateName
		configMap, err := c.ConfigMaps(ns).Get(catalogName)
		if err != nil {
			return nil, "", err
		}
		for k, v := range configMap.Data {
			if strings.LastIndex(k, ".json") >= 0 {
				return []byte(v), "json", nil
			}
			if strings.LastIndex(k, ".yml") >= 0 || strings.LastIndex(k, ".yaml") >= 0 {
				return []byte(v), "yaml", nil
			}
		}
		return nil, "", fmt.Errorf("Could not find a key for the catalog %s which ends with `.json` or `.yml`", catalogName)

	} else {
		template, err := oc.Templates(ns).Get(templateName)
		if err != nil {
			return nil, "", err
		}
		data, err := json.Marshal(template)
		return data, "json", err
	}
	return nil, "", nil
}