Exemplo n.º 1
0
// checkClaims compares all expectedClaims with set of claims at the end of the
// test and reports differences.
func (r *volumeReactor) checkClaims(t *testing.T, expectedClaims []*api.PersistentVolumeClaim) error {
	r.lock.Lock()
	defer r.lock.Unlock()

	expectedMap := make(map[string]*api.PersistentVolumeClaim)
	gotMap := make(map[string]*api.PersistentVolumeClaim)
	for _, c := range expectedClaims {
		c.ResourceVersion = ""
		expectedMap[c.Name] = c
	}
	for _, c := range r.claims {
		// We must clone the claim because of golang race check - it was
		// written by the controller without any locks on it.
		clone, _ := conversion.NewCloner().DeepCopy(c)
		c = clone.(*api.PersistentVolumeClaim)
		c.ResourceVersion = ""
		gotMap[c.Name] = c
	}
	if !reflect.DeepEqual(expectedMap, gotMap) {
		// Print ugly but useful diff of expected and received objects for
		// easier debugging.
		return fmt.Errorf("Claim check failed [A-expected, B-got result]: %s", diff.ObjectDiff(expectedMap, gotMap))
	}
	return nil
}
// setClaimProvisioner saves
// claim.Annotations[annStorageProvisioner] = class.Provisioner
func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *api.PersistentVolumeClaim, class *storage.StorageClass) (*api.PersistentVolumeClaim, error) {
	if val, ok := claim.Annotations[annDynamicallyProvisioned]; ok && val == class.Provisioner {
		// annotation is already set, nothing to do
		return claim, nil
	}

	// The volume from method args can be pointing to watcher cache. We must not
	// modify these, therefore create a copy.
	clone, err := conversion.NewCloner().DeepCopy(claim)
	if err != nil {
		return nil, fmt.Errorf("Error cloning pv: %v", err)
	}
	claimClone, ok := clone.(*api.PersistentVolumeClaim)
	if !ok {
		return nil, fmt.Errorf("Unexpected claim cast error : %v", claimClone)
	}
	api.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
	newClaim, err := ctrl.kubeClient.Core().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
	if err != nil {
		return newClaim, err
	}
	_, err = ctrl.storeClaimUpdate(newClaim)
	if err != nil {
		return newClaim, err
	}
	return newClaim, nil
}
Exemplo n.º 3
0
// checkVolumes compares all expectedVolumes with set of volumes at the end of
// the test and reports differences.
func (r *volumeReactor) checkVolumes(t *testing.T, expectedVolumes []*api.PersistentVolume) error {
	r.lock.Lock()
	defer r.lock.Unlock()

	expectedMap := make(map[string]*api.PersistentVolume)
	gotMap := make(map[string]*api.PersistentVolume)
	// Clear any ResourceVersion from both sets
	for _, v := range expectedVolumes {
		v.ResourceVersion = ""
		expectedMap[v.Name] = v
	}
	for _, v := range r.volumes {
		// We must clone the volume because of golang race check - it was
		// written by the controller without any locks on it.
		clone, _ := conversion.NewCloner().DeepCopy(v)
		v = clone.(*api.PersistentVolume)
		v.ResourceVersion = ""
		if v.Spec.ClaimRef != nil {
			v.Spec.ClaimRef.ResourceVersion = ""
		}
		gotMap[v.Name] = v
	}
	if !reflect.DeepEqual(expectedMap, gotMap) {
		// Print ugly but useful diff of expected and received objects for
		// easier debugging.
		return fmt.Errorf("Volume check failed [A-expected, B-got]: %s", diff.ObjectDiff(expectedMap, gotMap))
	}
	return nil
}
Exemplo n.º 4
0
func generateNodeAndTaintedNode(oldTaints []api.Taint, newTaints []api.Taint) (*api.Node, *api.Node) {
	var taintedNode *api.Node

	oldTaintsData, _ := json.Marshal(oldTaints)
	// Create a node.
	node := &api.Node{
		ObjectMeta: api.ObjectMeta{
			Name:              "node-name",
			CreationTimestamp: unversioned.Time{Time: time.Now()},
			Annotations: map[string]string{
				api.TaintsAnnotationKey: string(oldTaintsData),
			},
		},
		Spec: api.NodeSpec{
			ExternalID: "node-name",
		},
		Status: api.NodeStatus{},
	}
	clone, _ := conversion.NewCloner().DeepCopy(node)

	newTaintsData, _ := json.Marshal(newTaints)
	// A copy of the same node, but tainted.
	taintedNode = clone.(*api.Node)
	taintedNode.Annotations = map[string]string{
		api.TaintsAnnotationKey: string(newTaintsData),
	}

	return node, taintedNode
}
Exemplo n.º 5
0
/*
  updateClusterIngressUIDToMasters takes the ingress UID annotation on the master cluster and applies it to cluster.
  If there is no master cluster, then fallbackUID is used (and hence this cluster becomes the master).
*/
func (ic *IngressController) updateClusterIngressUIDToMasters(cluster *federation_api.Cluster, fallbackUID string) {
	masterCluster, masterUID, err := ic.getMasterCluster()
	clusterObj, clusterErr := conversion.NewCloner().DeepCopy(cluster) // Make a clone so that we don't clobber our input param
	cluster, ok := clusterObj.(*federation_api.Cluster)
	if clusterErr != nil || !ok {
		glog.Errorf("Internal error: Failed clone cluster resource while attempting to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
		return
	}
	if err == nil {
		if masterCluster.Name != cluster.Name { // We're not the master, need to get in sync
			cluster.ObjectMeta.Annotations[uidAnnotationKey] = masterUID
			if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil {
				glog.Errorf("Failed to add master ingress UID annotation (%q = %q) from master cluster %q to cluster %q, will try again later: %v", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name, err)
				return
			} else {
				glog.V(4).Infof("Successfully added master ingress UID annotation (%q = %q) from master cluster %q to cluster %q.", uidAnnotationKey, masterUID, masterCluster.Name, cluster.Name)
			}
		} else {
			glog.V(4).Infof("Cluster %q with ingress UID is already the master with annotation (%q = %q), no need to update.", cluster.Name, uidAnnotationKey, cluster.ObjectMeta.Annotations[uidAnnotationKey])
		}
	} else {
		glog.V(2).Infof("No master cluster found to source an ingress UID from for cluster %q.  Attempting to elect new master cluster %q with ingress UID %q = %q", cluster.Name, cluster.Name, uidAnnotationKey, fallbackUID)
		if fallbackUID != "" {
			cluster.ObjectMeta.Annotations[uidAnnotationKey] = fallbackUID
			if _, err = ic.federatedApiClient.Federation().Clusters().Update(cluster); err != nil {
				glog.Errorf("Failed to add ingress UID annotation (%q = %q) to cluster %q. No master elected. Will try again later: %v", uidAnnotationKey, fallbackUID, cluster.Name, err)
			} else {
				glog.V(4).Infof("Successfully added ingress UID annotation (%q = %q) to cluster %q.", uidAnnotationKey, fallbackUID, cluster.Name)
			}
		} else {
			glog.Errorf("No master cluster exists, and fallbackUID for cluster %q is invalid (%q).  This probably means that no clusters have an ingress controller configmap with key %q.  Federated Ingress currently supports clusters running Google Loadbalancer Controller (\"GLBC\")", cluster.Name, fallbackUID, uidKey)
		}
	}
}
Exemplo n.º 6
0
// NewScheme creates a new Scheme. This scheme is pluggable by default.
func NewScheme() *Scheme {
	s := &Scheme{
		gvkToType:        map[unversioned.GroupVersionKind]reflect.Type{},
		typeToGVK:        map[reflect.Type][]unversioned.GroupVersionKind{},
		unversionedTypes: map[reflect.Type]unversioned.GroupVersionKind{},
		unversionedKinds: map[string]reflect.Type{},
		cloner:           conversion.NewCloner(),
		fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{},
	}
	s.converter = conversion.NewConverter(s.nameFunc)

	s.AddConversionFuncs(DefaultEmbeddedConversions()...)

	// Enable map[string][]string conversions by default
	if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil {
		panic(err)
	}
	if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
		panic(err)
	}
	if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil {
		panic(err)
	}
	return s
}
Exemplo n.º 7
0
func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *v1.OwnerReference) (*v1.Pod, error) {
	desiredLabels := getPodsLabelSet(template)
	desiredFinalizers := getPodsFinalizers(template)
	desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
	if err != nil {
		return nil, err
	}
	accessor, err := meta.Accessor(parentObject)
	if err != nil {
		return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err)
	}
	prefix := getPodsPrefix(accessor.GetName())

	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			Labels:       desiredLabels,
			Annotations:  desiredAnnotations,
			GenerateName: prefix,
			Finalizers:   desiredFinalizers,
		},
	}
	if controllerRef != nil {
		pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
	}
	clone, err := conversion.NewCloner().DeepCopy(&template.Spec)
	if err != nil {
		return nil, err
	}
	pod.Spec = *clone.(*v1.PodSpec)
	return pod, nil
}
Exemplo n.º 8
0
// updateFederationService Returns whatever error occurred along with a boolean indicator of whether it
// should be retried.
func (s *ServiceController) updateFederationService(key string, cachedService *cachedService) (error, bool) {
	// Clone federation service, and create them in underlying k8s cluster
	clone, err := conversion.NewCloner().DeepCopy(cachedService.lastState)
	if err != nil {
		return err, !retryable
	}
	service, ok := clone.(*v1.Service)
	if !ok {
		return fmt.Errorf("Unexpected service cast error : %v\n", service), !retryable
	}

	// handle available clusters one by one
	var hasErr bool
	for clusterName, cache := range s.clusterCache.clientMap {
		go func(cache *clusterCache, clusterName string) {
			err = s.processServiceForCluster(cachedService, clusterName, service, cache.clientset)
			if err != nil {
				hasErr = true
			}
		}(cache, clusterName)
	}
	if hasErr {
		// detail error has been dumpped inside the loop
		return fmt.Errorf("Service %s/%s was not successfully updated to all clusters", service.Namespace, service.Name), retryable
	}
	return nil, !retryable
}
Exemplo n.º 9
0
func TestPodUpdateAnnotations(t *testing.T) {
	channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)

	pod := CreateValidPod("foo2", "new")
	pod.Annotations = make(map[string]string, 0)
	pod.Annotations["kubernetes.io/blah"] = "blah"

	clone, err := conversion.NewCloner().DeepCopy(pod)
	if err != nil {
		t.Fatalf("%v", err)
	}

	podUpdate := CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new"))
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")))

	pod.Annotations["kubenetes.io/blah"] = "superblah"
	podUpdate = CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, TestSource, pod))

	pod.Annotations["kubernetes.io/otherblah"] = "doh"
	podUpdate = CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, TestSource, pod))

	delete(pod.Annotations, "kubernetes.io/blah")
	podUpdate = CreatePodUpdate(kubelet.SET, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new"))
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, TestSource, pod))
}
Exemplo n.º 10
0
func DeepCopyApiTypeOrPanic(item interface{}) interface{} {
	result, err := conversion.NewCloner().DeepCopy(item)
	if err != nil {
		panic(err)
	}
	return result
}
Exemplo n.º 11
0
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {
	volumeListObj, err := volumeSource.List(api.ListOptions{})
	if err != nil {
		glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
		return
	}
	volumeList, ok := volumeListObj.(*api.PersistentVolumeList)
	if !ok {
		glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %#v", volumeListObj)
		return
	}
	for _, volume := range volumeList.Items {
		// Ignore template volumes from kubernetes 1.2
		deleted := ctrl.upgradeVolumeFrom1_2(&volume)
		if !deleted {
			clone, err := conversion.NewCloner().DeepCopy(&volume)
			if err != nil {
				glog.Errorf("error cloning volume %q: %v", volume.Name, err)
				continue
			}
			volumeClone := clone.(*api.PersistentVolume)
			ctrl.storeVolumeUpdate(volumeClone)
		}
	}

	claimListObj, err := claimSource.List(api.ListOptions{})
	if err != nil {
		glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
		return
	}
	claimList, ok := claimListObj.(*api.PersistentVolumeClaimList)
	if !ok {
		glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %#v", claimListObj)
		return
	}
	for _, claim := range claimList.Items {
		clone, err := conversion.NewCloner().DeepCopy(&claim)
		if err != nil {
			glog.Errorf("error cloning claim %q: %v", claimToClaimKey(&claim), err)
			continue
		}
		claimClone := clone.(*api.PersistentVolumeClaim)
		ctrl.storeClaimUpdate(claimClone)
	}
	glog.V(4).Infof("controller initialized")
}
Exemplo n.º 12
0
// modifyVolumeEvent simulates that a volume has been modified in etcd and the
// controller receives 'volume modified' event.
func (r *volumeReactor) modifyVolumeEvent(volume *api.PersistentVolume) {
	r.lock.Lock()
	defer r.lock.Unlock()

	r.volumes[volume.Name] = volume
	// Generate deletion event. Cloned volume is needed to prevent races (and we
	// would get a clone from etcd too).
	clone, _ := conversion.NewCloner().DeepCopy(volume)
	volumeClone := clone.(*api.PersistentVolume)
	r.volumeSource.Modify(volumeClone)
}
Exemplo n.º 13
0
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
	// In large clusters, GET and PUT operations on Node objects coming
	// from here are the majority of load on apiserver and etcd.
	// To reduce the load on etcd, we are serving GET operations from
	// apiserver cache (the data might be slightly delayed but it doesn't
	// seem to cause more confilict - the delays are pretty small).
	// If it result in a conflict, all retries are served directly from etcd.
	// TODO: Currently apiserver doesn't support serving GET operations
	// from its cache. Thus we are hacking it by issuing LIST with
	// field selector for the name of the node (field selectors with
	// specified name are handled efficiently by apiserver). Once
	// apiserver supports GET from cache, change it here.
	opts := v1.ListOptions{
		FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector().String(),
	}
	if tryNumber == 0 {
		opts.ResourceVersion = "0"
	}
	nodes, err := kl.kubeClient.Core().Nodes().List(opts)
	if err != nil {
		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
	}
	if len(nodes.Items) != 1 {
		return fmt.Errorf("no node instance returned for %q", kl.nodeName)
	}
	node := &nodes.Items[0]

	clonedNode, err := conversion.NewCloner().DeepCopy(node)
	if err != nil {
		return fmt.Errorf("error clone node %q: %v", kl.nodeName, err)
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
	}

	kl.updatePodCIDR(node.Spec.PodCIDR)

	if err := kl.setNodeStatus(node); err != nil {
		return err
	}
	// Patch the current status on the API server
	updatedNode, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), originalNode, node)
	if err != nil {
		return err
	}
	// If update finishes sucessfully, mark the volumeInUse as reportedInUse to indicate
	// those volumes are already updated in the node's status
	kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
	return nil
}
Exemplo n.º 14
0
// deleteVolumeEvent simulates that a volume has been deleted in etcd and
// the controller receives 'volume deleted' event.
func (r *volumeReactor) deleteVolumeEvent(volume *api.PersistentVolume) {
	r.lock.Lock()
	defer r.lock.Unlock()

	// Remove the volume from list of resulting volumes.
	delete(r.volumes, volume.Name)

	// Generate deletion event. Cloned volume is needed to prevent races (and we
	// would get a clone from etcd too).
	clone, _ := conversion.NewCloner().DeepCopy(volume)
	volumeClone := clone.(*api.PersistentVolume)
	r.volumeSource.Delete(volumeClone)
}
Exemplo n.º 15
0
// deleteClaimEvent simulates that a claim has been deleted in etcd and the
// controller receives 'claim deleted' event.
func (r *volumeReactor) deleteClaimEvent(claim *api.PersistentVolumeClaim) {
	r.lock.Lock()
	defer r.lock.Unlock()

	// Remove the claim from list of resulting claims.
	delete(r.claims, claim.Name)

	// Generate deletion event. Cloned volume is needed to prevent races (and we
	// would get a clone from etcd too).
	clone, _ := conversion.NewCloner().DeepCopy(claim)
	claimClone := clone.(*api.PersistentVolumeClaim)
	r.claimSource.Delete(claimClone)
}
Exemplo n.º 16
0
// Recycle recycles/scrubs clean a HostPath volume.
// Recycle blocks until the pod has completed or any error occurs.
// HostPath recycling only works in single node clusters and is meant for testing purposes only.
func (r *hostPathRecycler) Recycle() error {
	templateClone, err := conversion.NewCloner().DeepCopy(r.config.RecyclerPodTemplate)
	if err != nil {
		return err
	}
	pod := templateClone.(*v1.Pod)
	// overrides
	pod.Spec.ActiveDeadlineSeconds = &r.timeout
	pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{
		HostPath: &v1.HostPathVolumeSource{
			Path: r.path,
		},
	}
	return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
}
// provisionVolume provisions a volume that has been created in the cluster but not yet fulfilled by
// the storage provider.
func provisionVolume(pv *api.PersistentVolume, controller *PersistentVolumeProvisionerController) error {
	if isProvisioningComplete(pv) {
		return fmt.Errorf("PersistentVolume[%s] is already provisioned", pv.Name)
	}

	if _, exists := pv.Annotations[qosProvisioningKey]; !exists {
		return fmt.Errorf("PersistentVolume[%s] does not contain a provisioning request.  Provisioning not required.", pv.Name)
	}

	if controller.provisioner == nil {
		return fmt.Errorf("No provisioner found for volume: %s", pv.Name)
	}

	// Find the claim in local cache
	obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name))
	if !exists {
		return fmt.Errorf("Could not find PersistentVolumeClaim[%s/%s] in local cache", pv.Spec.ClaimRef.Name, pv.Name)
	}
	claim := obj.(*api.PersistentVolumeClaim)

	provisioner, _ := newProvisioner(controller.provisioner, claim)
	err := provisioner.Provision(pv)
	if err != nil {
		glog.Errorf("Could not provision %s", pv.Name)
		pv.Status.Phase = api.VolumeFailed
		pv.Status.Message = err.Error()
		if pv, apiErr := controller.client.UpdatePersistentVolumeStatus(pv); apiErr != nil {
			return fmt.Errorf("PersistentVolume[%s] failed provisioning and also failed status update: %v  -  %v", pv.Name, err, apiErr)
		}
		return fmt.Errorf("PersistentVolume[%s] failed provisioning : %v", pv.Name, err, err)
	}

	clone, err := conversion.NewCloner().DeepCopy(pv)
	volumeClone, ok := clone.(*api.PersistentVolume)
	if !ok {
		return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
	}
	volumeClone.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue

	pv, err = controller.client.UpdatePersistentVolume(volumeClone)
	if err != nil {
		// TODO:  https://github.com/kubernetes/kubernetes/issues/14443
		// the volume was created in the infrastructure and likely has a PV name on it,
		// but we failed to save the annotation that marks the volume as provisioned.
		return fmt.Errorf("Error updating PersistentVolume[%s] with provisioning completed annotation. There is a potential for dupes and orphans.", volumeClone.Name)
	}
	return nil
}
Exemplo n.º 18
0
// Recycle recycles/scrubs clean an NFS volume.
// Recycle blocks until the pod has completed or any error occurs.
func (r *nfsRecycler) Recycle() error {
	templateClone, err := conversion.NewCloner().DeepCopy(r.config.RecyclerPodTemplate)
	if err != nil {
		return err
	}
	pod := templateClone.(*api.Pod)
	// overrides
	pod.Spec.ActiveDeadlineSeconds = &r.timeout
	pod.GenerateName = "pv-recycler-nfs-"
	pod.Spec.Volumes[0].VolumeSource = api.VolumeSource{
		NFS: &api.NFSVolumeSource{
			Server: r.server,
			Path:   r.path,
		},
	}
	return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient(), r.eventRecorder)
}
Exemplo n.º 19
0
func TestMain(m *testing.M) {
	// Create a node.
	node = &api.Node{
		ObjectMeta: api.ObjectMeta{
			Name:              "node",
			CreationTimestamp: unversioned.Time{time.Now()},
		},
		Spec: api.NodeSpec{
			ExternalID: "node",
		},
		Status: api.NodeStatus{},
	}
	clone, _ := conversion.NewCloner().DeepCopy(node)

	// A copy of the same node, but cordoned.
	cordoned_node = clone.(*api.Node)
	cordoned_node.Spec.Unschedulable = true
	os.Exit(m.Run())
}
Exemplo n.º 20
0
func TestPodUpdateLables(t *testing.T) {
	channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)

	pod := CreateValidPod("foo2", "new")
	pod.Labels = make(map[string]string, 0)
	pod.Labels["key"] = "value"

	clone, err := conversion.NewCloner().DeepCopy(pod)
	if err != nil {
		t.Fatalf("%v", err)
	}

	podUpdate := CreatePodUpdate(kubelet.SET, TestSource, clone.(*api.Pod))
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, TestSource, pod))

	pod.Labels["key"] = "newValue"
	podUpdate = CreatePodUpdate(kubelet.SET, TestSource, pod)
	channel <- podUpdate
	expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, TestSource, pod))

}
Exemplo n.º 21
0
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
	// In large clusters, GET and PUT operations on Node objects coming
	// from here are the majority of load on apiserver and etcd.
	// To reduce the load on etcd, we are serving GET operations from
	// apiserver cache (the data might be slightly delayed but it doesn't
	// seem to cause more confilict - the delays are pretty small).
	// If it result in a conflict, all retries are served directly from etcd.
	opts := metav1.GetOptions{}
	if tryNumber == 0 {
		opts.ResourceVersion = "0"
	}
	node, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), opts)
	if err != nil {
		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
	}

	clonedNode, err := conversion.NewCloner().DeepCopy(node)
	if err != nil {
		return fmt.Errorf("error clone node %q: %v", kl.nodeName, err)
	}

	originalNode, ok := clonedNode.(*v1.Node)
	if !ok || originalNode == nil {
		return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode)
	}

	kl.updatePodCIDR(node.Spec.PodCIDR)

	kl.setNodeStatus(node)
	// Patch the current status on the API server
	updatedNode, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), originalNode, node)
	if err != nil {
		return err
	}
	// If update finishes sucessfully, mark the volumeInUse as reportedInUse to indicate
	// those volumes are already updated in the node's status
	kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
	return nil
}
func TestPersistentVolumeRecycler(t *testing.T) {
	_, s := framework.RunAMaster(t)
	defer s.Close()

	deleteAllEtcdKeys()
	binderClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	testClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	host := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)

	plugins := []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}}}
	cloud := &fake_cloud.FakeCloud{}

	binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 10*time.Second)
	binder.Run()
	defer binder.Stop()

	recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 30*time.Second, plugins, cloud)
	recycler.Run()
	defer recycler.Stop()

	// This PV will be claimed, released, and recycled.
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource:        api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
			Capacity:                      api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
			AccessModes:                   []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
			PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
		},
	}

	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
		Spec: api.PersistentVolumeClaimSpec{
			Resources:   api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
			AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
		},
	}

	w, _ := testClient.PersistentVolumes().Watch(api.ListOptions{})
	defer w.Stop()

	_, _ = testClient.PersistentVolumes().Create(pv)
	_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)

	// wait until the binder pairs the volume and claim
	waitForPersistentVolumePhase(w, api.VolumeBound)

	// deleting a claim releases the volume, after which it can be recycled
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
		t.Errorf("error deleting claim %s", pvc.Name)
	}

	waitForPersistentVolumePhase(w, api.VolumeReleased)
	waitForPersistentVolumePhase(w, api.VolumeAvailable)

	// end of Recycler test.
	// Deleter test begins now.
	// tests are serial because running masters concurrently that delete keys may cause similar tests to time out

	deleteAllEtcdKeys()

	// change the reclamation policy of the PV for the next test
	pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete

	w, _ = testClient.PersistentVolumes().Watch(api.ListOptions{})
	defer w.Stop()

	_, _ = testClient.PersistentVolumes().Create(pv)
	_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)

	waitForPersistentVolumePhase(w, api.VolumeBound)

	// deleting a claim releases the volume, after which it can be recycled
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
		t.Errorf("error deleting claim %s", pvc.Name)
	}

	waitForPersistentVolumePhase(w, api.VolumeReleased)

	for {
		event := <-w.ResultChan()
		if event.Type == watch.Deleted {
			break
		}
	}

	// test the race between claims and volumes.  ensure only a volume only binds to a single claim.
	deleteAllEtcdKeys()
	counter := 0
	maxClaims := 100
	claims := []*api.PersistentVolumeClaim{}
	for counter <= maxClaims {
		counter += 1
		clone, _ := conversion.NewCloner().DeepCopy(pvc)
		newPvc, _ := clone.(*api.PersistentVolumeClaim)
		newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-%d", counter)}
		claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(newPvc)
		if err != nil {
			t.Fatal("Error creating newPvc: %v", err)
		}
		claims = append(claims, claim)
	}

	// putting a bind manually on a pv should only match the claim it is bound to
	rand.Seed(time.Now().Unix())
	claim := claims[rand.Intn(maxClaims-1)]
	claimRef, err := api.GetReference(claim)
	if err != nil {
		t.Fatalf("Unexpected error getting claimRef: %v", err)
	}
	pv.Spec.ClaimRef = claimRef

	pv, err = testClient.PersistentVolumes().Create(pv)
	if err != nil {
		t.Fatalf("Unexpected error creating pv: %v", err)
	}

	waitForPersistentVolumePhase(w, api.VolumeBound)

	pv, err = testClient.PersistentVolumes().Get(pv.Name)
	if err != nil {
		t.Fatalf("Unexpected error getting pv: %v", err)
	}
	if pv.Spec.ClaimRef == nil {
		t.Fatalf("Unexpected nil claimRef")
	}
	if pv.Spec.ClaimRef.Namespace != claimRef.Namespace || pv.Spec.ClaimRef.Name != claimRef.Name {
		t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", claimRef.Namespace, claimRef.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
	}
}
Exemplo n.º 23
0
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
	glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress)
	if !ic.isSynced() {
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	}

	key := ingress.String()
	baseIngressObjFromStore, exist, err := ic.ingressInformerStore.GetByKey(key)
	if err != nil {
		glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	if !exist {
		// Not federated ingress, ignoring.
		glog.V(4).Infof("Ingress %q is not federated.  Ignoring.", ingress)
		return
	}
	baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore)
	baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
	if err != nil || !ok {
		glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
	} else {
		glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
	}

	if baseIngress.DeletionTimestamp != nil {
		if err := ic.delete(baseIngress); err != nil {
			glog.Errorf("Failed to delete %s: %v", ingress, err)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "DeleteFailed",
				"Ingress delete failed: %v", err)
			ic.deliverIngress(ingress, 0, true)
		}
		return
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for ingress: %s",
		baseIngress.Name)
	// Add the required finalizers before creating a ingress in underlying clusters.
	updatedIngressObj, err := ic.deletionHelper.EnsureFinalizers(baseIngress)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in ingress %s: %v",
			baseIngress.Name, err)
		ic.deliverIngress(ingress, 0, true)
		return
	}
	baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)

	glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)

	clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
	if err != nil {
		glog.Errorf("Failed to get cluster list: %v", err)
		ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
		return
	} else {
		glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress)
	}

	operations := make([]util.FederatedOperation, 0)

	for _, cluster := range clusters {
		baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable]
		firstClusterName, firstClusterExists := baseIngress.ObjectMeta.Annotations[firstClusterAnnotation]
		clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err)
			ic.deliverIngress(ingress, 0, true)
			return
		}
		desiredIngress := &extensionsv1beta1.Ingress{}
		objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
		if err != nil {
			glog.Errorf("Error deep copying ObjectMeta: %v", err)
		}
		objSpec, err := conversion.NewCloner().DeepCopy(baseIngress.Spec)
		if err != nil {
			glog.Errorf("Error deep copying Spec: %v", err)
		}
		desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
		}
		desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec)
		if !ok {
			glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
		}
		glog.V(4).Infof("Desired Ingress: %v", desiredIngress)

		if !clusterIngressFound {
			glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name)
			// We can't supply server-created fields when creating a new object.
			desiredIngress.ObjectMeta = util.DeepCopyRelevantObjectMeta(baseIngress.ObjectMeta)
			ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
				"Creating ingress in cluster %s", cluster.Name)

			// We always first create an ingress in the first available cluster. Once that ingress
			// has been created and allocated a global IP (visible via an annotation),
			// we record that annotation on the federated ingress, and create all other cluster
			// ingresses with that same global IP.
			// Note: If the first cluster becomes (e.g. temporarily) unavailable, the
			// second cluster will become the first cluster, but eventually all ingresses
			// will share the single global IP recorded in the annotation of the
			// federated ingress.
			haveFirstCluster := firstClusterExists && firstClusterName != "" && ic.isClusterReady(firstClusterName)
			if !haveFirstCluster {
				glog.V(4).Infof("No cluster has been chosen as the first cluster. Electing cluster %s as the first cluster to create ingress in", cluster.Name)
				ic.updateAnnotationOnIngress(baseIngress, firstClusterAnnotation, cluster.Name)
				return
			}
			if baseIPAnnotationExists || firstClusterName == cluster.Name {
				if baseIPAnnotationExists {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and static IP annotation (%q) exists on base ingress - queuing a create operation", ingress, cluster.Name, staticIPNameKeyWritable)
				} else {
					glog.V(4).Infof("No existing Ingress %s in cluster %s and no static IP annotation (%q) on base ingress - queuing a create operation in first cluster", ingress, cluster.Name, staticIPNameKeyWritable)
				}
				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeAdd,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
			} else {
				glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
			}
		} else {
			clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
			clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
			baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
			clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0
			logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v].  %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists)
			if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress
				glog.V(4).Infof(logStr, "Transferring")
				if !baseIPAnnotationExists && clusterIPNameExists {
					ic.updateAnnotationOnIngress(baseIngress, staticIPNameKeyWritable, clusterIPName)
					return
				}
				if !baseLBStatusExists && clusterLBStatusExists {
					lbstatusObj, lbErr := conversion.NewCloner().DeepCopy(&clusterIngress.Status.LoadBalancer)
					lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus)
					if lbErr != nil || !ok {
						glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					}
					baseIngress.Status.LoadBalancer = *lbstatus
					glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress)
					if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil {
						glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err)
						ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
						return
					} else {
						glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress)
						ic.deliverIngress(ingress, ic.smallDelay, false)
						return
					}
				}
			} else {
				glog.V(4).Infof(logStr, "Not transferring")
			}
			// Update existing cluster ingress, if needed.
			if util.ObjectMetaAndSpecEquivalent(baseIngress, clusterIngress) {
				glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name)
			} else {
				glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
				objMeta, err := conversion.NewCloner().DeepCopy(clusterIngress.ObjectMeta)
				if err != nil {
					glog.Errorf("Error deep copying ObjectMeta: %v", err)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)

				}
				desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
				if !ok {
					glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
					ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
				}
				// Merge any annotations and labels on the federated ingress onto the underlying cluster ingress,
				// overwriting duplicates.
				if desiredIngress.ObjectMeta.Annotations == nil {
					desiredIngress.ObjectMeta.Annotations = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Annotations {
					desiredIngress.ObjectMeta.Annotations[key] = val
				}
				if desiredIngress.ObjectMeta.Labels == nil {
					desiredIngress.ObjectMeta.Labels = make(map[string]string)
				}
				for key, val := range baseIngress.ObjectMeta.Labels {
					desiredIngress.ObjectMeta.Labels[key] = val
				}
				ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
					"Updating ingress in cluster %s", cluster.Name)

				operations = append(operations, util.FederatedOperation{
					Type:        util.OperationTypeUpdate,
					Obj:         desiredIngress,
					ClusterName: cluster.Name,
				})
				// TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster.
				// This is only for consistency, so that the federation ingress metadata matches the underlying clusters.  It's not actually required				}
			}
		}
	}

	if len(operations) == 0 {
		// Everything is in order
		glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress)
		return
	}
	glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
	err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
		ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate",
			"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
	})
	if err != nil {
		glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
		ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
		return
	}
	// Schedule another periodic reconciliation, only to account for possible bugs in watch processing.
	ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, volume *api.PersistentVolume) (err error) {
	glog.V(5).Infof("Synchronizing PersistentVolume[%s], current phase: %s\n", volume.Name, volume.Status.Phase)

	// The PV may have been modified by parallel call to syncVolume, load
	// the current version.
	newPv, err := binderClient.GetPersistentVolume(volume.Name)
	if err != nil {
		return fmt.Errorf("Cannot reload volume %s: %v", volume.Name, err)
	}
	volume = newPv

	// volumes can be in one of the following states:
	//
	// VolumePending -- default value -- not bound to a claim and not yet processed through this controller.
	// VolumeAvailable -- not bound to a claim, but processed at least once and found in this controller's volumeIndex.
	// VolumeBound -- bound to a claim because volume.Spec.ClaimRef != nil.   Claim status may not be correct.
	// VolumeReleased -- volume.Spec.ClaimRef != nil but the claim has been deleted by the user.
	// VolumeFailed -- volume.Spec.ClaimRef != nil and the volume failed processing in the recycler
	currentPhase := volume.Status.Phase
	nextPhase := currentPhase

	// Always store the newest volume state in local cache.
	_, exists, err := volumeIndex.Get(volume)
	if err != nil {
		return err
	}
	if !exists {
		volumeIndex.Add(volume)
	} else {
		volumeIndex.Update(volume)
	}

	if isBeingProvisioned(volume) {
		glog.V(4).Infof("Skipping PersistentVolume[%s], waiting for provisioning to finish", volume.Name)
		return nil
	}

	switch currentPhase {
	case api.VolumePending:

		// 3 possible states:
		//  1.  ClaimRef != nil and Claim exists:   Prebound to claim. Make volume available for binding (it will match PVC).
		//  2.  ClaimRef != nil and Claim !exists:  Recently recycled. Remove bind. Make volume available for new claim.
		//  3.  ClaimRef == nil: Neither recycled nor prebound.  Make volume available for binding.
		nextPhase = api.VolumeAvailable

		if volume.Spec.ClaimRef != nil {
			claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
			if errors.IsNotFound(err) || (claim != nil && claim.UID != volume.Spec.ClaimRef.UID) {
				if volume.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle {
					// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
					// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
					// removing ClaimRef unbinds the volume
					clone, err := conversion.NewCloner().DeepCopy(volume)
					if err != nil {
						return fmt.Errorf("Error cloning pv: %v", err)
					}
					volumeClone, ok := clone.(*api.PersistentVolume)
					if !ok {
						return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
					}
					volumeClone.Spec.ClaimRef = nil

					if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
						return fmt.Errorf("Unexpected error saving PersistentVolume: %+v", err)
					} else {
						volume = updatedVolume
						volumeIndex.Update(volume)
					}
				} else {
					// Pending volumes that has a ClaimRef and the claim is missing and is was not recycled.
					// It must have been freshly provisioned and the claim was deleted during the provisioning.
					// Mark the volume as Released, it will be deleted.
					nextPhase = api.VolumeReleased
				}
			} else if err != nil {
				return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
			}

			// Dynamically provisioned claims remain Pending until its volume is completely provisioned.
			// The provisioner updates the PV and triggers this update for the volume.  Explicitly sync'ing
			// the claim here prevents the need to wait until the next sync period when the claim would normally
			// advance to Bound phase. Otherwise, the maximum wait time for the claim to be Bound is the default sync period.
			if claim != nil && claim.Status.Phase == api.ClaimPending && keyExists(qosProvisioningKey, claim.Annotations) && isProvisioningComplete(volume) {
				syncClaim(volumeIndex, binderClient, claim)
			}
		}
		glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)

	// available volumes await a claim
	case api.VolumeAvailable:
		if volume.Spec.ClaimRef != nil {
			_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
			if err == nil {
				// change of phase will trigger an update event with the newly bound volume
				glog.V(5).Infof("PersistentVolume[%s] is now bound\n", volume.Name)
				nextPhase = api.VolumeBound
			} else {
				if errors.IsNotFound(err) {
					nextPhase = api.VolumeReleased
				}
			}
		}

	//bound volumes require verification of their bound claims
	case api.VolumeBound:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			claim, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)

			// A volume is Released when its bound claim cannot be found in the API server.
			// A claim by the same name can be found if deleted and recreated before this controller can release
			// the volume from the original claim, so a UID check is necessary.
			if err != nil {
				if errors.IsNotFound(err) {
					nextPhase = api.VolumeReleased
				} else {
					return err
				}
			} else if claim != nil && claim.UID != volume.Spec.ClaimRef.UID {
				nextPhase = api.VolumeReleased
			}
		}

	// released volumes require recycling
	case api.VolumeReleased:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			// another process is watching for released volumes.
			// PersistentVolumeReclaimPolicy is set per PersistentVolume
			//  Recycle - sets the PV to Pending and back under this controller's management
			//  Delete - delete events are handled by this controller's watch. PVs are removed from the index.
		}

	// volumes are removed by processes external to this binder and must be removed from the cluster
	case api.VolumeFailed:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			glog.V(5).Infof("PersistentVolume[%s] previously failed recycling.  Skipping.\n", volume.Name)
		}
	}

	if currentPhase != nextPhase {
		volume.Status.Phase = nextPhase

		// a change in state will trigger another update through this controller.
		// each pass through this controller evaluates current phase and decides whether or not to change to the next phase
		glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", volume.Name, currentPhase, nextPhase)
		volume, err := binderClient.UpdatePersistentVolumeStatus(volume)
		if err != nil {
			// Rollback to previous phase
			volume.Status.Phase = currentPhase
		}
		volumeIndex.Update(volume)
	}

	return nil
}
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
	nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
	for nodeName, attachedVolumes := range nodesToUpdate {
		nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName)
		if nodeObj == nil || !exists || err != nil {
			// If node does not exist, its status cannot be updated, log error and
			// reset flag statusUpdateNeeded back to true to indicate this node status
			// needs to be udpated again
			glog.V(2).Infof(
				"Could not update node status. Failed to find node %q in NodeInformer cache. %v",
				nodeName,
				err)
			nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
			continue
		}

		clonedNode, err := conversion.NewCloner().DeepCopy(nodeObj)
		if err != nil {
			return fmt.Errorf("error cloning node %q: %v",
				nodeName,
				err)
		}

		node, ok := clonedNode.(*api.Node)
		if !ok || node == nil {
			return fmt.Errorf(
				"failed to cast %q object %#v to Node",
				nodeName,
				clonedNode)
		}

		oldData, err := json.Marshal(node)
		if err != nil {
			return fmt.Errorf(
				"failed to Marshal oldData for node %q. %v",
				nodeName,
				err)
		}

		node.Status.VolumesAttached = attachedVolumes

		newData, err := json.Marshal(node)
		if err != nil {
			return fmt.Errorf(
				"failed to Marshal newData for node %q. %v",
				nodeName,
				err)
		}

		patchBytes, err :=
			strategicpatch.CreateStrategicMergePatch(oldData, newData, node)
		if err != nil {
			return fmt.Errorf(
				"failed to CreateStrategicMergePatch for node %q. %v",
				nodeName,
				err)
		}

		_, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes)
		if err != nil {
			// If update node status fails, reset flag statusUpdateNeeded back to true
			// to indicate this node status needs to be udpated again
			nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
			return fmt.Errorf(
				"failed to kubeClient.Core().Nodes().Patch for node %q. %v",
				nodeName,
				err)
		}
		glog.V(2).Infof(
			"Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v",
			nodeName,
			string(patchBytes),
			node.Status.VolumesAttached)

	}
	return nil
}
Exemplo n.º 26
0
					},
					Data: map[string]string{
						"federations": federationsDomainMap,
					},
				}
				// Create this configmap in all clusters.
				for clusterName, cluster := range clusters {
					By(fmt.Sprintf("Creating kube dns config map in cluster: %s", clusterName))
					_, err := cluster.Clientset.Core().ConfigMaps(KubeDNSConfigMapNamespace).Create(&kubeDNSConfigMap)
					framework.ExpectNoError(err, fmt.Sprintf("Error in creating config map in cluster %s", clusterName))
				}

				createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)

				service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
				obj, err := conversion.NewCloner().DeepCopy(service)
				// Cloning shouldn't fail. On the off-chance it does, we
				// should shallow copy service to serviceShard before
				// failing. If we don't do this we will never really
				// get a chance to clean up the underlying services
				// when the cloner fails for reasons not in our
				// control. For example, cloner bug. That will cause
				// the resources to leak, which in turn causes the
				// test project to run out of quota and the entire
				// suite starts failing. So we must try as hard as
				// possible to cleanup the underlying services. So
				// if DeepCopy fails, we are going to try with shallow
				// copy as a last resort.
				if err != nil {
					serviceCopy := *service
					serviceShard = &serviceCopy
func TestPersistentVolumeBindRace(t *testing.T) {
	// Test a race binding many claims to a PV that is pre-bound to a specific
	// PVC. Only this specific PVC should get bound.
	glog.V(2).Infof("TestPersistentVolumeBindRace started")
	_, s := framework.RunAMaster(nil)
	defer s.Close()

	ns := framework.CreateTestingNamespace("pv-bind-race", s, t)
	defer framework.DeleteTestingNamespace(ns, s, t)

	testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
	defer watchPV.Stop()
	defer watchPVC.Stop()

	// NOTE: This test cannot run in parallel, because it is creating and deleting
	// non-namespaced objects (PersistenceVolumes).
	defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})

	stopCh := make(chan struct{})
	ctrl.Run(stopCh)
	defer close(stopCh)

	pv := createPV("fake-pv-race", "/tmp/foo", "10G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
	pvc := createPVC("fake-pvc-race", ns.Name, "5G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
	counter := 0
	maxClaims := 100
	claims := []*api.PersistentVolumeClaim{}
	for counter <= maxClaims {
		counter += 1
		clone, _ := conversion.NewCloner().DeepCopy(pvc)
		newPvc, _ := clone.(*api.PersistentVolumeClaim)
		newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
		claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
		if err != nil {
			t.Fatalf("Error creating newPvc: %v", err)
		}
		claims = append(claims, claim)
	}
	glog.V(2).Infof("TestPersistentVolumeBindRace claims created")

	// putting a bind manually on a pv should only match the claim it is bound to
	rand.Seed(time.Now().Unix())
	claim := claims[rand.Intn(maxClaims-1)]
	claimRef, err := api.GetReference(claim)
	if err != nil {
		t.Fatalf("Unexpected error getting claimRef: %v", err)
	}
	pv.Spec.ClaimRef = claimRef
	pv.Spec.ClaimRef.UID = ""

	pv, err = testClient.PersistentVolumes().Create(pv)
	if err != nil {
		t.Fatalf("Unexpected error creating pv: %v", err)
	}
	glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)

	waitForPersistentVolumePhase(testClient, pv.Name, watchPV, api.VolumeBound)
	glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
	waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)
	glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")

	pv, err = testClient.PersistentVolumes().Get(pv.Name)
	if err != nil {
		t.Fatalf("Unexpected error getting pv: %v", err)
	}
	if pv.Spec.ClaimRef == nil {
		t.Fatalf("Unexpected nil claimRef")
	}
	if pv.Spec.ClaimRef.Namespace != claimRef.Namespace || pv.Spec.ClaimRef.Name != claimRef.Name {
		t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", claimRef.Namespace, claimRef.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
	}
}
func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, claim *api.PersistentVolumeClaim) (err error) {
	glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s] for binding", claim.Name)

	// The claim may have been modified by parallel call to syncClaim, load
	// the current version.
	newClaim, err := binderClient.GetPersistentVolumeClaim(claim.Namespace, claim.Name)
	if err != nil {
		return fmt.Errorf("Cannot reload claim %s/%s: %v", claim.Namespace, claim.Name, err)
	}
	claim = newClaim

	switch claim.Status.Phase {
	case api.ClaimPending:
		// claims w/ a storage-class annotation for provisioning with *only* match volumes with a ClaimRef of the claim.
		volume, err := volumeIndex.findBestMatchForClaim(claim)
		if err != nil {
			return err
		}

		if volume == nil {
			glog.V(5).Infof("A volume match does not exist for persistent claim: %s", claim.Name)
			return nil
		}

		if isBeingProvisioned(volume) {
			glog.V(5).Infof("PersistentVolume[%s] for PersistentVolumeClaim[%s/%s] is still being provisioned.", volume.Name, claim.Namespace, claim.Name)
			return nil
		}

		claimRef, err := api.GetReference(claim)
		if err != nil {
			return fmt.Errorf("Unexpected error getting claim reference: %v\n", err)
		}

		// Make a binding reference to the claim by persisting claimRef on the volume.
		// The local cache must be updated with the new bind to prevent subsequent
		// claims from binding to the volume.
		if volume.Spec.ClaimRef == nil {
			clone, err := conversion.NewCloner().DeepCopy(volume)
			if err != nil {
				return fmt.Errorf("Error cloning pv: %v", err)
			}
			volumeClone, ok := clone.(*api.PersistentVolume)
			if !ok {
				return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
			}
			volumeClone.Spec.ClaimRef = claimRef
			if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
				return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
			} else {
				volume = updatedVolume
				volumeIndex.Update(updatedVolume)
			}
		}

		// the bind is persisted on the volume above and will always match the claim in a search.
		// claim would remain Pending if the update fails, so processing this state is idempotent.
		// this only needs to be processed once.
		if claim.Spec.VolumeName != volume.Name {
			claim.Spec.VolumeName = volume.Name
			claim, err = binderClient.UpdatePersistentVolumeClaim(claim)
			if err != nil {
				return fmt.Errorf("Error updating claim with VolumeName %s: %+v\n", volume.Name, err)
			}
		}

		claim.Status.Phase = api.ClaimBound
		claim.Status.AccessModes = volume.Spec.AccessModes
		claim.Status.Capacity = volume.Spec.Capacity
		_, err = binderClient.UpdatePersistentVolumeClaimStatus(claim)
		if err != nil {
			return fmt.Errorf("Unexpected error saving claim status: %+v", err)
		}

	case api.ClaimBound:
		// no-op.  Claim is bound, values from PV are set.  PVCs are technically mutable in the API server
		// and we don't want to handle those changes at this time.

	default:
		return fmt.Errorf("Unknown state for PVC: %#v", claim)

	}

	glog.V(5).Infof("PersistentVolumeClaim[%s] is bound\n", claim.Name)
	return nil
}
func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, volume *api.PersistentVolume) (err error) {
	glog.V(5).Infof("Synchronizing PersistentVolume[%s], current phase: %s\n", volume.Name, volume.Status.Phase)

	// volumes can be in one of the following states:
	//
	// VolumePending -- default value -- not bound to a claim and not yet processed through this controller.
	// VolumeAvailable -- not bound to a claim, but processed at least once and found in this controller's volumeIndex.
	// VolumeBound -- bound to a claim because volume.Spec.ClaimRef != nil.   Claim status may not be correct.
	// VolumeReleased -- volume.Spec.ClaimRef != nil but the claim has been deleted by the user.
	// VolumeFailed -- volume.Spec.ClaimRef != nil and the volume failed processing in the recycler
	currentPhase := volume.Status.Phase
	nextPhase := currentPhase

	_, exists, err := volumeIndex.Get(volume)
	if err != nil {
		return err
	}
	if !exists {
		volumeIndex.Add(volume)
	}

	switch currentPhase {
	case api.VolumePending:

		// 3 possible states:
		//  1.  ClaimRef != nil and Claim exists:   Prebound to claim. Make volume available for binding (it will match PVC).
		//  2.  ClaimRef != nil and Claim !exists:  Recently recycled. Remove bind. Make volume available for new claim.
		//  3.  ClaimRef == nil: Neither recycled nor prebound.  Make volume available for binding.
		nextPhase = api.VolumeAvailable

		if volume.Spec.ClaimRef != nil {
			_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
			if errors.IsNotFound(err) {
				// Pending volumes that have a ClaimRef where the claim is missing were recently recycled.
				// The Recycler set the phase to VolumePending to start the volume at the beginning of this lifecycle.
				// removing ClaimRef unbinds the volume
				clone, err := conversion.NewCloner().DeepCopy(volume)
				if err != nil {
					return fmt.Errorf("Error cloning pv: %v", err)
				}
				volumeClone, ok := clone.(*api.PersistentVolume)
				if !ok {
					return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
				}
				volumeClone.Spec.ClaimRef = nil

				if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
					return fmt.Errorf("Unexpected error saving PersistentVolume: %+v", err)
				} else {
					volume = updatedVolume
					volumeIndex.Update(volume)
				}
			} else if err != nil {
				return fmt.Errorf("Error getting PersistentVolumeClaim[%s/%s]: %v", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, err)
			}
		}
		glog.V(5).Infof("PersistentVolume[%s] is available\n", volume.Name)

	// available volumes await a claim
	case api.VolumeAvailable:
		if volume.Spec.ClaimRef != nil {
			_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
			if err == nil {
				// change of phase will trigger an update event with the newly bound volume
				glog.V(5).Infof("PersistentVolume[%s] is now bound\n", volume.Name)
				nextPhase = api.VolumeBound
			} else {
				if errors.IsNotFound(err) {
					nextPhase = api.VolumeReleased
				}
			}
		}

	//bound volumes require verification of their bound claims
	case api.VolumeBound:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			_, err := binderClient.GetPersistentVolumeClaim(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
			if err != nil {
				if errors.IsNotFound(err) {
					nextPhase = api.VolumeReleased
				} else {
					return err
				}
			}
		}

	// released volumes require recycling
	case api.VolumeReleased:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			// another process is watching for released volumes.
			// PersistentVolumeReclaimPolicy is set per PersistentVolume
			//  Recycle - sets the PV to Pending and back under this controller's management
			//  Delete - delete events are handled by this controller's watch. PVs are removed from the index.
		}

	// volumes are removed by processes external to this binder and must be removed from the cluster
	case api.VolumeFailed:
		if volume.Spec.ClaimRef == nil {
			return fmt.Errorf("PersistentVolume[%s] expected to be bound but found nil claimRef: %+v", volume.Name, volume)
		} else {
			glog.V(5).Infof("PersistentVolume[%s] previously failed recycling.  Skipping.\n", volume.Name)
		}
	}

	if currentPhase != nextPhase {
		volume.Status.Phase = nextPhase

		// a change in state will trigger another update through this controller.
		// each pass through this controller evaluates current phase and decides whether or not to change to the next phase
		glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", volume.Name, currentPhase, nextPhase)
		volume, err := binderClient.UpdatePersistentVolumeStatus(volume)
		if err != nil {
			// Rollback to previous phase
			volume.Status.Phase = currentPhase
		}
		volumeIndex.Update(volume)
	}

	return nil
}
func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, claim *api.PersistentVolumeClaim) (err error) {
	glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s]\n", claim.Name)

	switch claim.Status.Phase {
	case api.ClaimPending:
		volume, err := volumeIndex.findBestMatchForClaim(claim)
		if err != nil {
			return err
		}
		if volume == nil {
			glog.V(5).Infof("A volume match does not exist for persistent claim: %s", claim.Name)
			return nil
		}

		// create a reference to the claim and assign it to the volume being bound.
		// the volume is a pointer and assigning the reference fixes a race condition where another
		// claim might match this volume but before the claimRef is persistent in the next case statement
		claimRef, err := api.GetReference(claim)
		if err != nil {
			return fmt.Errorf("Unexpected error getting claim reference: %v\n", err)
		}

		// make a binding reference to the claim and ensure to update the local index to prevent dupe bindings
		clone, err := conversion.NewCloner().DeepCopy(volume)
		if err != nil {
			return fmt.Errorf("Error cloning pv: %v", err)
		}
		volumeClone, ok := clone.(*api.PersistentVolume)
		if !ok {
			return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
		}
		volumeClone.Spec.ClaimRef = claimRef
		if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
			return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
		} else {
			volume = updatedVolume
			volumeIndex.Update(updatedVolume)
		}

		// the bind is persisted on the volume above and will always match the claim in a search.
		// claim would remain Pending if the update fails, so processing this state is idempotent.
		// this only needs to be processed once.
		if claim.Spec.VolumeName != volume.Name {
			claim.Spec.VolumeName = volume.Name
			claim, err = binderClient.UpdatePersistentVolumeClaim(claim)
			if err != nil {
				return fmt.Errorf("Error updating claim with VolumeName %s: %+v\n", volume.Name, err)
			}
		}

		claim.Status.Phase = api.ClaimBound
		claim.Status.AccessModes = volume.Spec.AccessModes
		claim.Status.Capacity = volume.Spec.Capacity
		_, err = binderClient.UpdatePersistentVolumeClaimStatus(claim)
		if err != nil {
			return fmt.Errorf("Unexpected error saving claim status: %+v", err)
		}

	case api.ClaimBound:
		// no-op.  Claim is bound, values from PV are set.  PVCs are technically mutable in the API server
		// and we don't want to handle those changes at this time.

	default:
		return fmt.Errorf("Unknown state for PVC: %#v", claim)

	}

	glog.V(5).Infof("PersistentVolumeClaim[%s] is bound\n", claim.Name)
	return nil
}