コード例 #1
0
ファイル: server.go プロジェクト: nak3/kubernetes
// encodePods creates an v1.PodList object from pods and returns the encoded
// PodList.
func encodePods(pods []*v1.Pod) (data []byte, err error) {
	podList := new(v1.PodList)
	for _, pod := range pods {
		podList.Items = append(podList.Items, *pod)
	}
	// TODO: this needs to be parameterized to the kubelet, not hardcoded. Depends on Kubelet
	//   as API server refactor.
	// TODO: Locked to v1, needs to be made generic
	codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
	return runtime.Encode(codec, podList)
}
コード例 #2
0
ファイル: sync.go プロジェクト: paralin/kubernetes
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
	return deploymentutil.ListPods(deployment,
		func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
			parsed, err := labels.Parse(options.LabelSelector)
			if err != nil {
				return nil, err
			}
			pods, err := dc.podLister.Pods(namespace).List(parsed)
			result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
			for i := range pods {
				result.Items = append(result.Items, *pods[i])
			}
			return &result, err
		})
}
コード例 #3
0
ファイル: sync.go プロジェクト: paralin/kubernetes
// addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps:
// 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created
// 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas
// 3. Add hash label to the rs's label and selector
func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) (updatedRS *extensions.ReplicaSet, err error) {
	objCopy, err := api.Scheme.Copy(rs)
	if err != nil {
		return nil, err
	}
	updatedRS = objCopy.(*extensions.ReplicaSet)

	// If the rs already has the new hash label in its selector, it's done syncing
	if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
		return
	}
	namespace := rs.Namespace
	hash := rsutil.GetPodTemplateSpecHash(rs)
	rsUpdated := false
	// 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label.
	updatedRS, rsUpdated, err = rsutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(namespace), updatedRS,
		func(updated *extensions.ReplicaSet) error {
			// Precondition: the RS doesn't contain the new hash in its pod template label.
			if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
				return utilerrors.ErrPreconditionViolated
			}
			updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
			return nil
		})
	if err != nil {
		return nil, fmt.Errorf("error updating %s %s/%s pod template label with template hash: %v", updatedRS.Kind, updatedRS.Namespace, updatedRS.Name, err)
	}
	if !rsUpdated {
		// If RS wasn't updated but didn't return error in step 1, we've hit a RS not found error.
		// Return here and retry in the next sync loop.
		return rs, nil
	}
	// Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods).
	if updatedRS.Generation > updatedRS.Status.ObservedGeneration {
		if err = deploymentutil.WaitForReplicaSetUpdated(dc.client, updatedRS.Generation, namespace, updatedRS.Name); err != nil {
			return nil, fmt.Errorf("error waiting for %s %s/%s generation %d observed by controller: %v", updatedRS.Kind, updatedRS.Namespace, updatedRS.Name, updatedRS.Generation, err)
		}
	}
	glog.V(4).Infof("Observed the update of %s %s/%s's pod template with hash %s.", rs.Kind, rs.Namespace, rs.Name, hash)

	// 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted.
	selector, err := unversioned.LabelSelectorAsSelector(updatedRS.Spec.Selector)
	if err != nil {
		return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
	}
	options := v1.ListOptions{LabelSelector: selector.String()}
	parsed, err := labels.Parse(options.LabelSelector)
	if err != nil {
		return nil, err
	}
	pods, err := dc.podLister.Pods(namespace).List(parsed)
	if err != nil {
		return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err)
	}
	podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
	for i := range pods {
		podList.Items = append(podList.Items, *pods[i])
	}
	allPodsLabeled := false
	if allPodsLabeled, err = deploymentutil.LabelPodsWithHash(&podList, updatedRS, dc.client, namespace, hash); err != nil {
		return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err)
	}
	// If not all pods are labeled but didn't return error in step 2, we've hit at least one pod not found error.
	// Return here and retry in the next sync loop.
	if !allPodsLabeled {
		return updatedRS, nil
	}

	// We need to wait for the replicaset controller to observe the pods being
	// labeled with pod template hash. Because previously we've called
	// WaitForReplicaSetUpdated, the replicaset controller should have dropped
	// FullyLabeledReplicas to 0 already, we only need to wait it to increase
	// back to the number of replicas in the spec.
	if err = deploymentutil.WaitForPodsHashPopulated(dc.client, updatedRS.Generation, namespace, updatedRS.Name); err != nil {
		return nil, fmt.Errorf("%s %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Kind, updatedRS.Namespace, updatedRS.Name, err)
	}

	// 3. Update rs label and selector to include the new hash label
	// Copy the old selector, so that we can scrub out any orphaned pods
	if updatedRS, rsUpdated, err = rsutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(namespace), updatedRS,
		func(updated *extensions.ReplicaSet) error {
			// Precondition: the RS doesn't contain the new hash in its label or selector.
			if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
				return utilerrors.ErrPreconditionViolated
			}
			updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash)
			updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash)
			return nil
		}); err != nil {
		return nil, fmt.Errorf("error updating %s %s/%s label and selector with template hash: %v", updatedRS.Kind, updatedRS.Namespace, updatedRS.Name, err)
	}
	if rsUpdated {
		glog.V(4).Infof("Updated %s %s/%s's selector and label with hash %s.", rs.Kind, rs.Namespace, rs.Name, hash)
	}
	// If the RS isn't actually updated in step 3, that's okay, we'll retry in the next sync loop since its selector isn't updated yet.

	// TODO: look for orphaned pods and label them in the background somewhere else periodically

	return updatedRS, nil
}
コード例 #4
0
ファイル: init_test.go プロジェクト: kubernetes/kubernetes
func fakeInitHostFactory(federationName, namespaceName, ip, dnsZoneName, image, dnsProvider, etcdPVCapacity, storageProvider string) (cmdutil.Factory, error) {
	svcName := federationName + "-apiserver"
	svcUrlPrefix := "/api/v1/namespaces/federation-system/services"
	credSecretName := svcName + "-credentials"
	cmKubeconfigSecretName := federationName + "-controller-manager-kubeconfig"
	pvCap := "10Gi"
	if etcdPVCapacity != "" {
		pvCap = etcdPVCapacity
	}

	capacity, err := resource.ParseQuantity(pvCap)
	if err != nil {
		return nil, err
	}
	pvcName := svcName + "-etcd-claim"
	replicas := int32(1)

	namespace := v1.Namespace{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Namespace",
			APIVersion: testapi.Default.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name: namespaceName,
		},
	}

	svc := v1.Service{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Service",
			APIVersion: testapi.Default.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Namespace: namespaceName,
			Name:      svcName,
			Labels:    componentLabel,
		},
		Spec: v1.ServiceSpec{
			Type:     v1.ServiceTypeLoadBalancer,
			Selector: apiserverSvcSelector,
			Ports: []v1.ServicePort{
				{
					Name:       "https",
					Protocol:   "TCP",
					Port:       443,
					TargetPort: intstr.FromInt(443),
				},
			},
		},
	}

	svcWithLB := svc
	svcWithLB.Status = v1.ServiceStatus{
		LoadBalancer: v1.LoadBalancerStatus{
			Ingress: []v1.LoadBalancerIngress{
				{
					IP: ip,
				},
			},
		},
	}

	credSecret := v1.Secret{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Secret",
			APIVersion: testapi.Default.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      credSecretName,
			Namespace: namespaceName,
		},
		Data: nil,
	}

	cmKubeconfigSecret := v1.Secret{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Secret",
			APIVersion: testapi.Default.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      cmKubeconfigSecretName,
			Namespace: namespaceName,
		},
		Data: nil,
	}

	pvc := v1.PersistentVolumeClaim{
		TypeMeta: metav1.TypeMeta{
			Kind:       "PersistentVolumeClaim",
			APIVersion: testapi.Default.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      pvcName,
			Namespace: namespaceName,
			Labels:    componentLabel,
			Annotations: map[string]string{
				"volume.alpha.kubernetes.io/storage-class": "yes",
			},
		},
		Spec: v1.PersistentVolumeClaimSpec{
			AccessModes: []v1.PersistentVolumeAccessMode{
				v1.ReadWriteOnce,
			},
			Resources: v1.ResourceRequirements{
				Requests: v1.ResourceList{
					v1.ResourceStorage: capacity,
				},
			},
		},
	}

	apiserver := v1beta1.Deployment{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Deployment",
			APIVersion: testapi.Extensions.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      svcName,
			Namespace: namespaceName,
			Labels:    componentLabel,
		},
		Spec: v1beta1.DeploymentSpec{
			Replicas: &replicas,
			Selector: nil,
			Template: v1.PodTemplateSpec{
				ObjectMeta: metav1.ObjectMeta{
					Name:   svcName,
					Labels: apiserverPodLabels,
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  "apiserver",
							Image: image,
							Command: []string{
								"/hyperkube",
								"federation-apiserver",
								"--bind-address=0.0.0.0",
								"--etcd-servers=http://localhost:2379",
								"--secure-port=443",
								"--client-ca-file=/etc/federation/apiserver/ca.crt",
								"--tls-cert-file=/etc/federation/apiserver/server.crt",
								"--tls-private-key-file=/etc/federation/apiserver/server.key",
								fmt.Sprintf("--storage-backend=%s", storageProvider),
								"--advertise-address=" + ip,
							},
							Ports: []v1.ContainerPort{
								{
									Name:          "https",
									ContainerPort: 443,
								},
								{
									Name:          "local",
									ContainerPort: 8080,
								},
							},
							VolumeMounts: []v1.VolumeMount{
								{
									Name:      credSecretName,
									MountPath: "/etc/federation/apiserver",
									ReadOnly:  true,
								},
							},
						},
						{
							Name:  "etcd",
							Image: "gcr.io/google_containers/etcd:3.0.14-alpha.1",
							Command: []string{
								"/usr/local/bin/etcd",
								"--data-dir",
								"/var/etcd/data",
							},
							VolumeMounts: []v1.VolumeMount{
								{
									Name:      "etcddata",
									MountPath: "/var/etcd",
								},
							},
						},
					},
					Volumes: []v1.Volume{
						{
							Name: credSecretName,
							VolumeSource: v1.VolumeSource{
								Secret: &v1.SecretVolumeSource{
									SecretName: credSecretName,
								},
							},
						},
						{
							Name: "etcddata",
							VolumeSource: v1.VolumeSource{
								PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
									ClaimName: pvcName,
								},
							},
						},
					},
				},
			},
		},
	}

	cmName := federationName + "-controller-manager"
	cm := v1beta1.Deployment{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Deployment",
			APIVersion: testapi.Extensions.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      cmName,
			Namespace: namespaceName,
			Labels:    componentLabel,
		},
		Spec: v1beta1.DeploymentSpec{
			Replicas: &replicas,
			Selector: nil,
			Template: v1.PodTemplateSpec{
				ObjectMeta: metav1.ObjectMeta{
					Name:   cmName,
					Labels: controllerManagerPodLabels,
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  "controller-manager",
							Image: image,
							Command: []string{
								"/hyperkube",
								"federation-controller-manager",
								"--master=https://" + svcName,
								"--kubeconfig=/etc/federation/controller-manager/kubeconfig",
								fmt.Sprintf("--dns-provider=%s", dnsProvider),
								"--dns-provider-config=",
								fmt.Sprintf("--federation-name=%s", federationName),
								fmt.Sprintf("--zone-name=%s", dnsZoneName),
							},
							VolumeMounts: []v1.VolumeMount{
								{
									Name:      cmKubeconfigSecretName,
									MountPath: "/etc/federation/controller-manager",
									ReadOnly:  true,
								},
							},
							Env: []v1.EnvVar{
								{
									Name: "POD_NAMESPACE",
									ValueFrom: &v1.EnvVarSource{
										FieldRef: &v1.ObjectFieldSelector{
											FieldPath: "metadata.namespace",
										},
									},
								},
							},
						},
					},
					Volumes: []v1.Volume{
						{
							Name: cmKubeconfigSecretName,
							VolumeSource: v1.VolumeSource{
								Secret: &v1.SecretVolumeSource{
									SecretName: cmKubeconfigSecretName,
								},
							},
						},
					},
				},
			},
		},
	}

	podList := v1.PodList{}
	apiServerPod := v1.Pod{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Pod",
			APIVersion: testapi.Extensions.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      svcName,
			Namespace: namespaceName,
		},
		Status: v1.PodStatus{
			Phase: "Running",
		},
	}

	cmPod := v1.Pod{
		TypeMeta: metav1.TypeMeta{
			Kind:       "Pod",
			APIVersion: testapi.Extensions.GroupVersion().String(),
		},
		ObjectMeta: metav1.ObjectMeta{
			Name:      cmName,
			Namespace: namespaceName,
		},
		Status: v1.PodStatus{
			Phase: "Running",
		},
	}

	podList.Items = append(podList.Items, apiServerPod)
	podList.Items = append(podList.Items, cmPod)

	f, tf, codec, _ := cmdtesting.NewAPIFactory()
	extCodec := testapi.Extensions.Codec()
	ns := dynamic.ContentConfig().NegotiatedSerializer
	tf.ClientConfig = kubefedtesting.DefaultClientConfig()
	tf.Client = &fake.RESTClient{
		NegotiatedSerializer: ns,
		Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
			switch p, m := req.URL.Path, req.Method; {
			case p == "/healthz":
				return &http.Response{StatusCode: http.StatusOK, Header: kubefedtesting.DefaultHeader(), Body: ioutil.NopCloser(bytes.NewReader([]byte("ok")))}, nil
			case p == "/api/v1/namespaces" && m == http.MethodPost:
				body, err := ioutil.ReadAll(req.Body)
				if err != nil {
					return nil, err
				}
				var got v1.Namespace
				_, _, err = codec.Decode(body, nil, &got)
				if err != nil {
					return nil, err
				}
				if !api.Semantic.DeepEqual(got, namespace) {
					return nil, fmt.Errorf("Unexpected namespace object\n\tDiff: %s", diff.ObjectGoPrintDiff(got, namespace))
				}
				return &http.Response{StatusCode: http.StatusCreated, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &namespace)}, nil
			case p == svcUrlPrefix && m == http.MethodPost:
				body, err := ioutil.ReadAll(req.Body)
				if err != nil {
					return nil, err
				}
				var got v1.Service
				_, _, err = codec.Decode(body, nil, &got)
				if err != nil {
					return nil, err
				}
				if !api.Semantic.DeepEqual(got, svc) {
					return nil, fmt.Errorf("Unexpected service object\n\tDiff: %s", diff.ObjectGoPrintDiff(got, svc))
				}
				return &http.Response{StatusCode: http.StatusCreated, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &svc)}, nil
			case strings.HasPrefix(p, svcUrlPrefix) && m == http.MethodGet:
				got := strings.TrimPrefix(p, svcUrlPrefix+"/")
				if got != svcName {
					return nil, errors.NewNotFound(api.Resource("services"), got)
				}
				return &http.Response{StatusCode: http.StatusOK, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &svcWithLB)}, nil
			case p == "/api/v1/namespaces/federation-system/secrets" && m == http.MethodPost:
				body, err := ioutil.ReadAll(req.Body)
				if err != nil {
					return nil, err
				}
				var got, want v1.Secret
				_, _, err = codec.Decode(body, nil, &got)
				if err != nil {
					return nil, err
				}
				// Obtained secret contains generated data which cannot
				// be compared, so we just nullify the generated part
				// and compare the rest of the secret. The generated
				// parts are tested in other tests.
				got.Data = nil
				switch got.Name {
				case credSecretName:
					want = credSecret
				case cmKubeconfigSecretName:
					want = cmKubeconfigSecret
				}
				if !api.Semantic.DeepEqual(got, want) {
					return nil, fmt.Errorf("Unexpected secret object\n\tDiff: %s", diff.ObjectGoPrintDiff(got, want))
				}
				return &http.Response{StatusCode: http.StatusCreated, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &want)}, nil
			case p == "/api/v1/namespaces/federation-system/persistentvolumeclaims" && m == http.MethodPost:
				body, err := ioutil.ReadAll(req.Body)
				if err != nil {
					return nil, err
				}
				var got v1.PersistentVolumeClaim
				_, _, err = codec.Decode(body, nil, &got)
				if err != nil {
					return nil, err
				}
				if !api.Semantic.DeepEqual(got, pvc) {
					return nil, fmt.Errorf("Unexpected PVC object\n\tDiff: %s", diff.ObjectGoPrintDiff(got, pvc))
				}
				return &http.Response{StatusCode: http.StatusCreated, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &pvc)}, nil
			case p == "/apis/extensions/v1beta1/namespaces/federation-system/deployments" && m == http.MethodPost:
				body, err := ioutil.ReadAll(req.Body)
				if err != nil {
					return nil, err
				}
				var got, want v1beta1.Deployment
				_, _, err = codec.Decode(body, nil, &got)
				if err != nil {
					return nil, err
				}
				switch got.Name {
				case svcName:
					want = apiserver
				case cmName:
					want = cm
				}
				if !api.Semantic.DeepEqual(got, want) {
					return nil, fmt.Errorf("Unexpected deployment object\n\tDiff: %s", diff.ObjectGoPrintDiff(got, want))
				}
				return &http.Response{StatusCode: http.StatusCreated, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(extCodec, &want)}, nil
			case p == "/api/v1/namespaces/federation-system/pods" && m == http.MethodGet:
				return &http.Response{StatusCode: http.StatusOK, Header: kubefedtesting.DefaultHeader(), Body: kubefedtesting.ObjBody(codec, &podList)}, nil
			default:
				return nil, fmt.Errorf("unexpected request: %#v\n%#v", req.URL, req)
			}
		}),
	}
	return f, nil
}