Exemplo n.º 1
0
func (e *HookExecutor) emitEvent(deployment *kapi.ReplicationController, eventType, reason, msg string) {
	t := unversioned.Time{Time: time.Now()}
	var ref *kapi.ObjectReference
	if config, err := deployutil.DecodeDeploymentConfig(deployment, e.decoder); err != nil {
		glog.Errorf("Unable to decode deployment %s/%s to replication contoller: %v", deployment.Namespace, deployment.Name, err)
		if ref, err = kapi.GetReference(deployment); err != nil {
			glog.Errorf("Unable to get reference for %#v: %v", deployment, err)
			return
		}
	} else {
		if ref, err = kapi.GetReference(config); err != nil {
			glog.Errorf("Unable to get reference for %#v: %v", config, err)
			return
		}
	}
	event := &kapi.Event{
		ObjectMeta: kapi.ObjectMeta{
			Name:      fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
			Namespace: ref.Namespace,
		},
		InvolvedObject: *ref,
		Reason:         reason,
		Message:        msg,
		FirstTimestamp: t,
		LastTimestamp:  t,
		Count:          1,
		Type:           eventType,
	}
	if _, err := e.events.Create(event); err != nil {
		glog.Errorf("Could not send event '%#v': %v", event, err)
	}
}
func TestReconcileVolume(t *testing.T) {

	controller, mockClient := makeTestController()
	pv := makeTestVolume()
	pvc := makeTestClaim()

	err := controller.reconcileVolume(pv)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}

	// watch adds claim to the store.
	// we need to add it to our mock client to mimic normal Get call
	controller.claimStore.Add(pvc)
	mockClient.claim = pvc

	// pretend the claim and volume are bound, no provisioning required
	claimRef, _ := api.GetReference(pvc)
	pv.Spec.ClaimRef = claimRef
	err = controller.reconcileVolume(pv)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}

	pv.Annotations[pvProvisioningRequiredAnnotationKey] = "!pvProvisioningCompleted"
	pv.Annotations[qosProvisioningKey] = "foo"
	err = controller.reconcileVolume(pv)

	if !isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, mockClient.volume.Annotations) {
		t.Errorf("Expected %s but got %s", pvProvisioningRequiredAnnotationKey, mockClient.volume.Annotations[pvProvisioningRequiredAnnotationKey])
	}
}
Exemplo n.º 3
0
func TestWatchHTTP(t *testing.T) {
	simpleStorage := &SimpleRESTStorage{}
	handler := handle(map[string]rest.Storage{"simples": simpleStorage})
	server := httptest.NewServer(handler)
	defer server.Close()
	client := http.Client{}

	dest, _ := url.Parse(server.URL)
	dest.Path = "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/watch/simples"
	dest.RawQuery = ""

	request, err := http.NewRequest("GET", dest.String(), nil)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	response, err := client.Do(request)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if response.StatusCode != http.StatusOK {
		t.Errorf("Unexpected response %#v", response)
	}

	decoder := json.NewDecoder(response.Body)

	for i, item := range watchTestTable {
		// Send
		simpleStorage.fakeWatch.Action(item.t, item.obj)
		// Test receive
		var got watchJSON
		err := decoder.Decode(&got)
		if err != nil {
			t.Fatalf("%d: Unexpected error: %v", i, err)
		}
		if got.Type != item.t {
			t.Errorf("%d: Unexpected type: %v", i, got.Type)
		}
		t.Logf("obj: %v", string(got.Object))
		gotObj, err := codec.Decode(got.Object)
		if err != nil {
			t.Fatalf("Decode error: %v", err)
		}
		t.Logf("obj: %#v", gotObj)
		if _, err := api.GetReference(gotObj); err != nil {
			t.Errorf("Unable to construct reference: %v", err)
		}
		if e, a := item.obj, gotObj; !reflect.DeepEqual(e, a) {
			t.Errorf("Expected %#v, got %#v", e, a)
		}
	}
	simpleStorage.fakeWatch.Stop()

	var got watchJSON
	err = decoder.Decode(&got)
	if err == nil {
		t.Errorf("Unexpected non-error")
	}
}
Exemplo n.º 4
0
func (d *PodDescriber) Describe(namespace, name string) (string, error) {
	rc := d.ReplicationControllers(namespace)
	pc := d.Pods(namespace)

	pod, err := pc.Get(name)
	if err != nil {
		eventsInterface := d.Events(namespace)
		events, err2 := eventsInterface.List(
			labels.Everything(),
			eventsInterface.GetFieldSelector(&name, &namespace, nil, nil))
		if err2 == nil && len(events.Items) > 0 {
			return tabbedString(func(out io.Writer) error {
				fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err)
				DescribeEvents(events, out)
				return nil
			})
		}
		return "", err
	}

	var events *api.EventList
	if ref, err := api.GetReference(pod); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", pod, err)
	} else {
		ref.Kind = ""
		events, _ = d.Events(namespace).Search(ref)
	}

	rcs, err := getReplicationControllersForLabels(rc, labels.Set(pod.Labels))
	if err != nil {
		return "", err
	}

	return describePod(pod, rcs, events)
}
Exemplo n.º 5
0
func (d *NodeDescriber) Describe(namespace, name string) (string, error) {
	mc := d.Nodes()
	node, err := mc.Get(name)
	if err != nil {
		return "", err
	}

	var pods []*api.Pod
	allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	for i := range allPods.Items {
		pod := &allPods.Items[i]
		if pod.Spec.NodeName != name {
			continue
		}
		pods = append(pods, pod)
	}

	var events *api.EventList
	if ref, err := api.GetReference(node); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
	} else {
		// TODO: We haven't decided the namespace for Node object yet.
		ref.UID = types.UID(ref.Name)
		events, _ = d.Events("").Search(ref)
	}

	return describeNode(node, pods, events)
}
Exemplo n.º 6
0
// RecordConfigEvent records an event for the deployment config referenced by the
// deployment.
func RecordConfigEvent(client kclient.EventNamespacer, deployment *kapi.ReplicationController, decoder runtime.Decoder, eventType, reason, msg string) {
	t := unversioned.Time{Time: time.Now()}
	var obj runtime.Object = deployment
	if config, err := deployutil.DecodeDeploymentConfig(deployment, decoder); err == nil {
		obj = config
	} else {
		glog.Errorf("Unable to decode deployment config from %s/%s: %v", deployment.Namespace, deployment.Name, err)
	}
	ref, err := kapi.GetReference(obj)
	if err != nil {
		glog.Errorf("Unable to get reference for %#v: %v", obj, err)
		return
	}
	event := &kapi.Event{
		ObjectMeta: kapi.ObjectMeta{
			Name:      fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
			Namespace: ref.Namespace,
		},
		InvolvedObject: *ref,
		Reason:         reason,
		Message:        msg,
		Source: kapi.EventSource{
			Component: deployutil.DeployerPodNameFor(deployment),
		},
		FirstTimestamp: t,
		LastTimestamp:  t,
		Count:          1,
		Type:           eventType,
	}
	if _, err := client.Events(ref.Namespace).Create(event); err != nil {
		glog.Errorf("Could not create event '%#v': %v", event, err)
	}
}
Exemplo n.º 7
0
func refJSON(t *testing.T, o runtime.Object) string {
	ref, err := api.GetReference(o)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	codec := testapi.Default.Codec()
	json := runtime.EncodeOrDie(codec, &api.SerializedReference{Reference: *ref})
	return string(json)
}
func (controller *PersistentVolumeProvisionerController) reconcileClaim(claim *api.PersistentVolumeClaim) error {
	if controller.provisioner == nil {
		return fmt.Errorf("No provisioner configured for controller")
	}

	// no provisioning requested, return Pending. Claim may be pending indefinitely without a match.
	if !keyExists(qosProvisioningKey, claim.Annotations) {
		glog.V(5).Infof("PersistentVolumeClaim[%s] no provisioning required", claim.Name)
		return nil
	}
	if len(claim.Spec.VolumeName) != 0 {
		glog.V(5).Infof("PersistentVolumeClaim[%s] already bound. No provisioning required", claim.Name)
		return nil
	}
	if isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, claim.Annotations) {
		glog.V(5).Infof("PersistentVolumeClaim[%s] is already provisioned.", claim.Name)
		return nil
	}

	glog.V(5).Infof("PersistentVolumeClaim[%s] provisioning", claim.Name)
	provisioner, err := newProvisioner(controller.provisioner, claim)
	if err != nil {
		return fmt.Errorf("Unexpected error getting new provisioner for claim %s: %v\n", claim.Name, err)
	}
	newVolume, err := provisioner.NewPersistentVolumeTemplate()
	if err != nil {
		return fmt.Errorf("Unexpected error getting new volume template for claim %s: %v\n", claim.Name, err)
	}

	claimRef, err := api.GetReference(claim)
	if err != nil {
		return fmt.Errorf("Unexpected error getting claim reference for %s: %v\n", claim.Name, err)
	}

	storageClass, _ := claim.Annotations[qosProvisioningKey]

	// the creation of this volume is the bind to the claim.
	// The claim will match the volume during the next sync period when the volume is in the local cache
	newVolume.Spec.ClaimRef = claimRef
	newVolume.Annotations[pvProvisioningRequiredAnnotationKey] = "true"
	newVolume.Annotations[qosProvisioningKey] = storageClass
	newVolume, err = controller.client.CreatePersistentVolume(newVolume)
	glog.V(5).Infof("Unprovisioned PersistentVolume[%s] created for PVC[%s], which will be fulfilled in the storage provider", newVolume.Name, claim.Name)
	if err != nil {
		return fmt.Errorf("PersistentVolumeClaim[%s] failed provisioning: %+v", claim.Name, err)
	}

	claim.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue
	_, err = controller.client.UpdatePersistentVolumeClaim(claim)
	if err != nil {
		glog.Error("error updating persistent volume claim: %v", err)
	}

	return nil
}
Exemplo n.º 9
0
func TestWatchWebsocket(t *testing.T) {
	simpleStorage := &SimpleRESTStorage{}
	_ = rest.Watcher(simpleStorage) // Give compile error if this doesn't work.
	handler := handle(map[string]rest.Storage{"simples": simpleStorage})
	server := httptest.NewServer(handler)
	// TODO: Uncomment when fix #19254
	// defer server.Close()

	dest, _ := url.Parse(server.URL)
	dest.Scheme = "ws" // Required by websocket, though the server never sees it.
	dest.Path = "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/watch/simples"
	dest.RawQuery = ""

	ws, err := websocket.Dial(dest.String(), "", "http://localhost")
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	try := func(action watch.EventType, object runtime.Object) {
		// Send
		simpleStorage.fakeWatch.Action(action, object)
		// Test receive
		var got watchJSON
		err := websocket.JSON.Receive(ws, &got)
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}
		if got.Type != action {
			t.Errorf("Unexpected type: %v", got.Type)
		}
		gotObj, err := runtime.Decode(codec, got.Object)
		if err != nil {
			t.Fatalf("Decode error: %v", err)
		}
		if _, err := api.GetReference(gotObj); err != nil {
			t.Errorf("Unable to construct reference: %v", err)
		}
		if e, a := object, gotObj; !reflect.DeepEqual(e, a) {
			t.Errorf("Expected %#v, got %#v", e, a)
		}
	}

	for _, item := range watchTestTable {
		try(item.t, item.obj)
	}
	simpleStorage.fakeWatch.Stop()

	var got watchJSON
	err = websocket.JSON.Receive(ws, &got)
	if err == nil {
		t.Errorf("Unexpected non-error")
	}
}
Exemplo n.º 10
0
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, reason, message string) {
	ref, err := api.GetReference(object)
	if err != nil {
		glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message)
		return
	}

	event := makeEvent(ref, reason, message)
	event.Source = recorder.source

	recorder.Action(watch.Added, event)
}
Exemplo n.º 11
0
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
	ref, err := api.GetReference(obj)
	if err != nil {
		return
	}
	event := f.makeEvent(ref, eventtype, reason, message)
	event.Source = f.source
	if f.events != nil {
		fmt.Println("write event")
		f.events = append(f.events, event)
	}
}
Exemplo n.º 12
0
func refJson(t *testing.T, o runtime.Object) string {
	ref, err := api.GetReference(o)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, _, codec := NewAPIFactory()
	json, err := runtime.Encode(codec, &api.SerializedReference{Reference: *ref})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	return string(json)
}
Exemplo n.º 13
0
// Get gets kubernetes resources as pretty printed string
//
// Namespace will set the namespace
func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
	// Since we don't know what order the objects come in, let's group them by the types, so
	// that when we print them, they come looking good (headers apply to subgroups, etc.)
	objs := make(map[string][]runtime.Object)
	err := perform(c, namespace, reader, func(info *resource.Info) error {
		log.Printf("Doing get for: '%s'", info.Name)
		obj, err := resource.NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name, info.Export)
		if err != nil {
			return err
		}
		// We need to grab the ObjectReference so we can correctly group the objects.
		or, err := api.GetReference(obj)
		if err != nil {
			log.Printf("FAILED GetReference for: %#v\n%v", obj, err)
			return err
		}

		// Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple
		// versions per cluster, but this certainly won't hurt anything, so let's be safe.
		objType := or.APIVersion + "/" + or.Kind
		objs[objType] = append(objs[objType], obj)
		return nil
	})

	// Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so
	// spin through them and print them. Printer is cool since it prints the header only when
	// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
	// track of tab widths
	buf := new(bytes.Buffer)
	p := kubectl.NewHumanReadablePrinter(kubectl.PrintOptions{})
	for t, ot := range objs {
		_, err = buf.WriteString("==> " + t + "\n")
		if err != nil {
			return "", err
		}
		for _, o := range ot {
			err = p.PrintObj(o, buf)
			if err != nil {
				log.Printf("failed to print object type '%s', object: '%s' :\n %v", t, o, err)
				return "", err
			}
		}
		_, err := buf.WriteString("\n")
		if err != nil {
			return "", err
		}
	}
	return buf.String(), err
}
Exemplo n.º 14
0
// GetNodeEvents gets events associated to node with given name.
func GetNodeEvents(client client.Interface, dsQuery *dataselect.DataSelectQuery, nodeName string) (*common.EventList, error) {
	var eventList common.EventList

	mc := client.Core().Nodes()
	node, _ := mc.Get(nodeName)
	if ref, err := api.GetReference(node); err == nil {
		ref.UID = types.UID(ref.Name)
		events, _ := client.Core().Events(api.NamespaceAll).Search(ref)
		eventList = CreateEventList(events.Items, dsQuery)
	} else {
		log.Print(err)
	}

	return &eventList, nil
}
Exemplo n.º 15
0
func TestFindingPreboundVolumes(t *testing.T) {
	claim := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "claim01",
			Namespace: "myns",
			SelfLink:  testapi.Default.SelfLink("pvc", ""),
		},
		Spec: api.PersistentVolumeClaimSpec{
			AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
			Resources:   api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi")}},
		},
	}
	claimRef, err := api.GetReference(claim)
	if err != nil {
		t.Errorf("error getting claimRef: %v", err)
	}

	pv1 := testVolume("pv1", "1Gi")
	pv5 := testVolume("pv5", "5Gi")
	pv8 := testVolume("pv8", "8Gi")

	index := newPersistentVolumeOrderedIndex()
	index.store.Add(pv1)
	index.store.Add(pv5)
	index.store.Add(pv8)

	// expected exact match on size
	volume, _ := index.findBestMatchForClaim(claim)
	if volume.Name != pv1.Name {
		t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name)
	}

	// pretend the exact match is pre-bound.  should get the next size up.
	pv1.Spec.ClaimRef = &api.ObjectReference{Name: "foo", Namespace: "bar"}
	volume, _ = index.findBestMatchForClaim(claim)
	if volume.Name != pv5.Name {
		t.Errorf("Expected %s but got volume %s instead", pv5.Name, volume.Name)
	}

	// pretend the exact match is available but the largest volume is pre-bound to the claim.
	pv1.Spec.ClaimRef = nil
	pv8.Spec.ClaimRef = claimRef
	volume, _ = index.findBestMatchForClaim(claim)
	if volume.Name != pv8.Name {
		t.Errorf("Expected %s but got volume %s instead", pv8.Name, volume.Name)
	}
}
// GetNodeEvents gets events associated to node with given name.
func GetNodeEvents(client client.Interface, nodeName string) (common.EventList, error) {
	eventList := common.EventList{
		Namespace: api.NamespaceAll,
		Events:    make([]common.Event, 0),
	}

	mc := client.Nodes()
	node, _ := mc.Get(nodeName)
	if ref, err := api.GetReference(node); err == nil {
		ref.UID = types.UID(ref.Name)
		events, _ := client.Events(api.NamespaceAll).Search(ref)
		AppendEvents(events.Items, eventList)
	} else {
		log.Print(err)
	}

	return eventList, nil
}
Exemplo n.º 17
0
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
	desiredAnnotations := make(labels.Set)
	for k, v := range template.Annotations {
		desiredAnnotations[k] = v
	}
	createdByRef, err := api.GetReference(object)
	if err != nil {
		return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
	}
	createdByRefJson, err := registered.GroupOrDie(api.GroupName).Codec.Encode(&api.SerializedReference{
		Reference: *createdByRef,
	})
	if err != nil {
		return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
	}
	desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson)
	return desiredAnnotations, nil
}
Exemplo n.º 18
0
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) {
	createdByRef, err := api.GetReference(object)
	if err != nil {
		return "", fmt.Errorf("unable to get controller reference: %v", err)
	}

	// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
	//   would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
	//   We need to consistently handle this case of annotation versioning.
	codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})

	createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
		Reference: *createdByRef,
	})
	if err != nil {
		return "", fmt.Errorf("unable to serialize controller reference: %v", err)
	}
	return string(createdByRefJson), nil
}
func TestReconcileVolume(t *testing.T) {

	controller, mockClient, mockVolumePlugin := makeTestController()
	pv := makeTestVolume()
	pvc := makeTestClaim()
	mockClient.volume = pv

	err := controller.reconcileVolume(pv)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}

	// watch adds claim to the store.
	// we need to add it to our mock client to mimic normal Get call
	controller.claimStore.Add(pvc)
	mockClient.claim = pvc

	// pretend the claim and volume are bound, no provisioning required
	claimRef, _ := api.GetReference(pvc)
	pv.Spec.ClaimRef = claimRef
	mockClient.volume = pv
	err = controller.reconcileVolume(pv)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}

	pv.Annotations[pvProvisioningRequiredAnnotationKey] = "!pvProvisioningCompleted"
	pv.Annotations[qosProvisioningKey] = "foo"
	mockClient.volume = pv
	err = controller.reconcileVolume(pv)

	if !isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, mockClient.volume.Annotations) {
		t.Errorf("Expected %s but got %s", pvProvisioningRequiredAnnotationKey, mockClient.volume.Annotations[pvProvisioningRequiredAnnotationKey])
	}

	// Check that the volume plugin was called with correct tags
	tags := *mockVolumePlugin.LastProvisionerOptions.CloudTags
	checkTagValue(t, tags, cloudVolumeCreatedForClaimNamespaceTag, pvc.Namespace)
	checkTagValue(t, tags, cloudVolumeCreatedForClaimNameTag, pvc.Name)
	checkTagValue(t, tags, cloudVolumeCreatedForVolumeNameTag, pv.Name)

}
Exemplo n.º 20
0
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
	ref, err := api.GetReference(object)
	if err != nil {
		glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
		return
	}

	if !validateEventType(eventtype) {
		glog.Errorf("Unsupported event type: '%v'", eventtype)
		return
	}

	event := recorder.makeEvent(ref, eventtype, reason, message)
	event.Source = recorder.source

	go func() {
		// NOTE: events should be a non-blocking operation
		recorder.Action(watch.Added, event)
	}()
}
Exemplo n.º 21
0
// Search finds events about the specified object. The namespace of the
// object must match this event's client namespace unless the event client
// was made with the "" namespace.
func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) {
	ref, err := api.GetReference(objOrRef)
	if err != nil {
		return nil, err
	}
	if e.ns != "" && ref.Namespace != e.ns {
		return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns)
	}
	stringRefKind := string(ref.Kind)
	var refKind *string
	if stringRefKind != "" {
		refKind = &stringRefKind
	}
	stringRefUID := string(ref.UID)
	var refUID *string
	if stringRefUID != "" {
		refUID = &stringRefUID
	}
	fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
	return e.List(api.ListOptions{FieldSelector: fieldSelector})
}
Exemplo n.º 22
0
// FillPodSecurityPolicySubjectReviewStatus fills PodSecurityPolicySubjectReviewStatus assigning SecurityContectConstraint to the PodSpec
func FillPodSecurityPolicySubjectReviewStatus(s *securityapi.PodSecurityPolicySubjectReviewStatus, provider kscc.SecurityContextConstraintsProvider, spec kapi.PodSpec, constraint *kapi.SecurityContextConstraints) (bool, error) {
	pod := &kapi.Pod{
		Spec: spec,
	}
	if errs := oscc.AssignSecurityContext(provider, pod, field.NewPath(fmt.Sprintf("provider %s: ", provider.GetSCCName()))); len(errs) > 0 {
		glog.Errorf("unable to assign SecurityContextConstraints provider: %v", errs)
		s.Reason = "CantAssignSecurityContextConstraintProvider"
		return false, fmt.Errorf("unable to assign SecurityContextConstraints provider: %v", errs.ToAggregate())
	}
	ref, err := kapi.GetReference(constraint)
	if err != nil {
		s.Reason = "CantObtainReference"
		return false, fmt.Errorf("unable to get SecurityContextConstraints reference: %v", err)
	}
	s.AllowedBy = ref

	if len(spec.ServiceAccountName) > 0 {
		s.Template.Spec = pod.Spec
	}
	return true, nil
}
Exemplo n.º 23
0
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
	desiredAnnotations := make(labels.Set)
	for k, v := range template.Annotations {
		desiredAnnotations[k] = v
	}
	createdByRef, err := api.GetReference(object)
	if err != nil {
		return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
	}

	// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
	//   would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
	//   We need to consistently handle this case of annotation versioning.
	codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})

	createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
		Reference: *createdByRef,
	})
	if err != nil {
		return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
	}
	desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson)
	return desiredAnnotations, nil
}
// TestPersistentVolumeControllerStartup tests startup of the controller.
// The controller should not unbind any volumes when it starts.
func TestPersistentVolumeControllerStartup(t *testing.T) {
	_, s := framework.RunAMaster(nil)
	defer s.Close()

	ns := framework.CreateTestingNamespace("controller-startup", s, t)
	defer framework.DeleteTestingNamespace(ns, s, t)

	objCount := getObjectCount()

	const shortSyncPeriod = 2 * time.Second
	syncPeriod := getSyncPeriod(shortSyncPeriod)

	testClient, binder, watchPV, watchPVC := createClients(ns, t, s, shortSyncPeriod)
	defer watchPV.Stop()
	defer watchPVC.Stop()

	// Create *bound* volumes and PVCs
	pvs := make([]*api.PersistentVolume, objCount)
	pvcs := make([]*api.PersistentVolumeClaim, objCount)
	for i := 0; i < objCount; i++ {
		pvName := "pv-startup-" + strconv.Itoa(i)
		pvcName := "pvc-startup-" + strconv.Itoa(i)

		pvc := createPVC(pvcName, ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
		pvc.Annotations = map[string]string{"annBindCompleted": ""}
		pvc.Spec.VolumeName = pvName
		newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
		if err != nil {
			t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
		}
		// Save Bound status as a separate transaction
		newPVC.Status.Phase = api.ClaimBound
		newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
		if err != nil {
			t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
		}
		pvcs[i] = newPVC
		// Drain watchPVC with all events generated by the PVC until it's bound
		// We don't want to catch "PVC craated with Status.Phase == Pending"
		// later in this test.
		waitForAnyPersistentVolumeClaimPhase(watchPVC, api.ClaimBound)

		pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
			[]api.PersistentVolumeAccessMode{api.ReadWriteOnce}, api.PersistentVolumeReclaimRetain)
		claimRef, err := api.GetReference(newPVC)
		if err != nil {
			glog.V(3).Infof("unexpected error getting claim reference: %v", err)
			return
		}
		pv.Spec.ClaimRef = claimRef
		newPV, err := testClient.PersistentVolumes().Create(pv)
		if err != nil {
			t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
		}
		// Save Bound status as a separate transaction
		newPV.Status.Phase = api.VolumeBound
		newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
		if err != nil {
			t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
		}
		pvs[i] = newPV
		// Drain watchPV with all events generated by the PV until it's bound
		// We don't want to catch "PV craated with Status.Phase == Pending"
		// later in this test.
		waitForAnyPersistentVolumePhase(watchPV, api.VolumeBound)
	}

	// Start the controller when all PVs and PVCs are already saved in etcd
	stopCh := make(chan struct{})
	binder.Run(stopCh)
	defer close(stopCh)

	// wait for at least two sync periods for changes. No volume should be
	// Released and no claim should be Lost during this time.
	timer := time.NewTimer(2 * syncPeriod)
	defer timer.Stop()
	finished := false
	for !finished {
		select {
		case volumeEvent := <-watchPV.ResultChan():
			volume, ok := volumeEvent.Object.(*api.PersistentVolume)
			if !ok {
				continue
			}
			if volume.Status.Phase != api.VolumeBound {
				t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
			}

		case claimEvent := <-watchPVC.ResultChan():
			claim, ok := claimEvent.Object.(*api.PersistentVolumeClaim)
			if !ok {
				continue
			}
			if claim.Status.Phase != api.ClaimBound {
				t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
			}

		case <-timer.C:
			// Wait finished
			glog.V(2).Infof("Wait finished")
			finished = true
		}
	}

	// check that everything is bound to something
	for i := 0; i < objCount; i++ {
		pv, err := testClient.PersistentVolumes().Get(pvs[i].Name)
		if err != nil {
			t.Fatalf("Unexpected error getting pv: %v", err)
		}
		if pv.Spec.ClaimRef == nil {
			t.Fatalf("PV %q is not bound", pv.Name)
		}
		glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name)

		pvc, err := testClient.PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name)
		if err != nil {
			t.Fatalf("Unexpected error getting pvc: %v", err)
		}
		if pvc.Spec.VolumeName == "" {
			t.Fatalf("PVC %q is not bound", pvc.Name)
		}
		glog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName)
	}
}
Exemplo n.º 25
0
func TestWatchRead(t *testing.T) {
	simpleStorage := &SimpleRESTStorage{}
	_ = rest.Watcher(simpleStorage) // Give compile error if this doesn't work.
	handler := handle(map[string]rest.Storage{"simples": simpleStorage})
	server := httptest.NewServer(handler)
	defer server.Close()

	dest, _ := url.Parse(server.URL)
	dest.Path = "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/simples"
	dest.RawQuery = "watch=1"

	connectHTTP := func(accept string) (io.ReadCloser, string) {
		client := http.Client{}
		request, err := http.NewRequest("GET", dest.String(), nil)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		request.Header.Add("Accept", accept)

		response, err := client.Do(request)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}

		if response.StatusCode != http.StatusOK {
			t.Fatalf("Unexpected response %#v", response)
		}
		return response.Body, response.Header.Get("Content-Type")
	}

	connectWebSocket := func(accept string) (io.ReadCloser, string) {
		dest := *dest
		dest.Scheme = "ws" // Required by websocket, though the server never sees it.
		config, err := websocket.NewConfig(dest.String(), "http://localhost")
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		config.Header.Add("Accept", accept)
		ws, err := websocket.DialConfig(config)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		return ws, "__default__"
	}

	testCases := []struct {
		Accept              string
		ExpectedContentType string
		MediaType           string
	}{
		{
			Accept:              "application/json",
			ExpectedContentType: "application/json",
			MediaType:           "application/json",
		},
		// TODO: yaml stream serialization requires that RawExtension.MarshalJSON
		// be able to understand nested encoding (since yaml calls json.Marshal
		// rather than yaml.Marshal, which results in the raw bytes being in yaml).
		// Same problem as thirdparty object.
		/*{
			Accept:              "application/yaml",
			ExpectedContentType: "application/yaml;stream=watch",
			MediaType:           "application/yaml",
		},*/
		{
			Accept:              "application/vnd.kubernetes.protobuf",
			ExpectedContentType: "application/vnd.kubernetes.protobuf;stream=watch",
			MediaType:           "application/vnd.kubernetes.protobuf",
		},
		{
			Accept:              "application/vnd.kubernetes.protobuf;stream=watch",
			ExpectedContentType: "application/vnd.kubernetes.protobuf;stream=watch",
			MediaType:           "application/vnd.kubernetes.protobuf",
		},
	}
	protocols := []struct {
		name        string
		selfFraming bool
		fn          func(string) (io.ReadCloser, string)
	}{
		{name: "http", fn: connectHTTP},
		{name: "websocket", selfFraming: true, fn: connectWebSocket},
	}

	for _, protocol := range protocols {
		for _, test := range testCases {
			serializer, ok := api.Codecs.StreamingSerializerForMediaType(test.MediaType, nil)
			if !ok {
				t.Fatal(serializer)
			}

			r, contentType := protocol.fn(test.Accept)
			defer r.Close()

			if contentType != "__default__" && contentType != test.ExpectedContentType {
				t.Errorf("Unexpected content type: %#v", contentType)
			}
			objectSerializer, ok := api.Codecs.SerializerForMediaType(test.MediaType, nil)
			if !ok {
				t.Fatal(objectSerializer)
			}
			objectCodec := api.Codecs.DecoderToVersion(objectSerializer, testInternalGroupVersion)

			var fr io.ReadCloser = r
			if !protocol.selfFraming {
				fr = serializer.Framer.NewFrameReader(r)
			}
			d := streaming.NewDecoder(fr, serializer)

			var w *watch.FakeWatcher
			for w == nil {
				w = simpleStorage.Watcher()
				time.Sleep(time.Millisecond)
			}

			for i, item := range podWatchTestTable {
				action, object := item.t, item.obj
				name := fmt.Sprintf("%s-%s-%d", protocol.name, test.MediaType, i)

				// Send
				w.Action(action, object)
				// Test receive
				var got versioned.Event
				_, _, err := d.Decode(nil, &got)
				if err != nil {
					t.Fatalf("%s: Unexpected error: %v", name, err)
				}
				if got.Type != string(action) {
					t.Errorf("%s: Unexpected type: %v", name, got.Type)
				}

				gotObj, err := runtime.Decode(objectCodec, got.Object.Raw)
				if err != nil {
					t.Fatalf("%s: Decode error: %v", name, err)
				}
				if _, err := api.GetReference(gotObj); err != nil {
					t.Errorf("%s: Unable to construct reference: %v", name, err)
				}
				if e, a := object, gotObj; !api.Semantic.DeepEqual(e, a) {
					t.Errorf("%s: different: %s", name, diff.ObjectDiff(e, a))
				}
			}
			w.Stop()

			var got versioned.Event
			_, _, err := d.Decode(nil, &got)
			if err == nil {
				t.Errorf("Unexpected non-error")
			}

			r.Close()
		}
	}
}
Exemplo n.º 26
0
func TestWatchWebsocketClientClose(t *testing.T) {
	simpleStorage := &SimpleRESTStorage{}
	_ = rest.Watcher(simpleStorage) // Give compile error if this doesn't work.
	handler := handle(map[string]rest.Storage{"simples": simpleStorage})
	server := httptest.NewServer(handler)
	defer server.Close()

	dest, _ := url.Parse(server.URL)
	dest.Scheme = "ws" // Required by websocket, though the server never sees it.
	dest.Path = "/" + prefix + "/" + testGroupVersion.Group + "/" + testGroupVersion.Version + "/watch/simples"
	dest.RawQuery = ""

	ws, err := websocket.Dial(dest.String(), "", "http://localhost")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	try := func(action watch.EventType, object runtime.Object) {
		// Send
		simpleStorage.fakeWatch.Action(action, object)
		// Test receive
		var got watchJSON
		err := websocket.JSON.Receive(ws, &got)
		if err != nil {
			t.Fatalf("Unexpected error: %v", err)
		}
		if got.Type != action {
			t.Errorf("Unexpected type: %v", got.Type)
		}
		gotObj, err := runtime.Decode(codec, got.Object)
		if err != nil {
			t.Fatalf("Decode error: %v\n%v", err, got)
		}
		if _, err := api.GetReference(gotObj); err != nil {
			t.Errorf("Unable to construct reference: %v", err)
		}
		if e, a := object, gotObj; !reflect.DeepEqual(e, a) {
			t.Errorf("Expected %#v, got %#v", e, a)
		}
	}

	// Send/receive should work
	for _, item := range watchTestTable {
		try(item.t, item.obj)
	}

	// Sending normal data should be ignored
	websocket.JSON.Send(ws, map[string]interface{}{"test": "data"})

	// Send/receive should still work
	for _, item := range watchTestTable {
		try(item.t, item.obj)
	}

	// Client requests a close
	ws.Close()

	select {
	case data, ok := <-simpleStorage.fakeWatch.ResultChan():
		if ok {
			t.Errorf("expected a closed result channel, but got watch result %#v", data)
		}
	case <-time.After(5 * time.Second):
		t.Errorf("watcher did not close when client closed")
	}

	var got watchJSON
	err = websocket.JSON.Receive(ws, &got)
	if err == nil {
		t.Errorf("Unexpected non-error")
	}
}
func TestPersistentVolumeRecycler(t *testing.T) {
	_, s := framework.RunAMaster(t)
	defer s.Close()

	deleteAllEtcdKeys()
	binderClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	testClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	host := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)

	plugins := []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}}}
	cloud := &fake_cloud.FakeCloud{}

	binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 10*time.Second)
	binder.Run()
	defer binder.Stop()

	recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 30*time.Second, plugins, cloud)
	recycler.Run()
	defer recycler.Stop()

	// This PV will be claimed, released, and recycled.
	pv := &api.PersistentVolume{
		ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
		Spec: api.PersistentVolumeSpec{
			PersistentVolumeSource:        api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
			Capacity:                      api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
			AccessModes:                   []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
			PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
		},
	}

	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
		Spec: api.PersistentVolumeClaimSpec{
			Resources:   api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
			AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
		},
	}

	w, _ := testClient.PersistentVolumes().Watch(api.ListOptions{})
	defer w.Stop()

	_, _ = testClient.PersistentVolumes().Create(pv)
	_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)

	// wait until the binder pairs the volume and claim
	waitForPersistentVolumePhase(w, api.VolumeBound)

	// deleting a claim releases the volume, after which it can be recycled
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
		t.Errorf("error deleting claim %s", pvc.Name)
	}

	waitForPersistentVolumePhase(w, api.VolumeReleased)
	waitForPersistentVolumePhase(w, api.VolumeAvailable)

	// end of Recycler test.
	// Deleter test begins now.
	// tests are serial because running masters concurrently that delete keys may cause similar tests to time out

	deleteAllEtcdKeys()

	// change the reclamation policy of the PV for the next test
	pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete

	w, _ = testClient.PersistentVolumes().Watch(api.ListOptions{})
	defer w.Stop()

	_, _ = testClient.PersistentVolumes().Create(pv)
	_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)

	waitForPersistentVolumePhase(w, api.VolumeBound)

	// deleting a claim releases the volume, after which it can be recycled
	if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
		t.Errorf("error deleting claim %s", pvc.Name)
	}

	waitForPersistentVolumePhase(w, api.VolumeReleased)

	for {
		event := <-w.ResultChan()
		if event.Type == watch.Deleted {
			break
		}
	}

	// test the race between claims and volumes.  ensure only a volume only binds to a single claim.
	deleteAllEtcdKeys()
	counter := 0
	maxClaims := 100
	claims := []*api.PersistentVolumeClaim{}
	for counter <= maxClaims {
		counter += 1
		clone, _ := conversion.NewCloner().DeepCopy(pvc)
		newPvc, _ := clone.(*api.PersistentVolumeClaim)
		newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-%d", counter)}
		claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(newPvc)
		if err != nil {
			t.Fatal("Error creating newPvc: %v", err)
		}
		claims = append(claims, claim)
	}

	// putting a bind manually on a pv should only match the claim it is bound to
	rand.Seed(time.Now().Unix())
	claim := claims[rand.Intn(maxClaims-1)]
	claimRef, err := api.GetReference(claim)
	if err != nil {
		t.Fatalf("Unexpected error getting claimRef: %v", err)
	}
	pv.Spec.ClaimRef = claimRef

	pv, err = testClient.PersistentVolumes().Create(pv)
	if err != nil {
		t.Fatalf("Unexpected error creating pv: %v", err)
	}

	waitForPersistentVolumePhase(w, api.VolumeBound)

	pv, err = testClient.PersistentVolumes().Get(pv.Name)
	if err != nil {
		t.Fatalf("Unexpected error getting pv: %v", err)
	}
	if pv.Spec.ClaimRef == nil {
		t.Fatalf("Unexpected nil claimRef")
	}
	if pv.Spec.ClaimRef.Namespace != claimRef.Namespace || pv.Spec.ClaimRef.Name != claimRef.Name {
		t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", claimRef.Namespace, claimRef.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
	}
}
Exemplo n.º 28
0
func getRef(object runtime.Object) (*api.ObjectReference, error) {
	return api.GetReference(object)
}
Exemplo n.º 29
0
func describeNode(c *gin.Context) {
	nodename := c.Param("no")

	node, err := kubeclient.Get().Nodes().Get(nodename)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	d := page.NodeDetail{
		Name:              node.Name,
		Labels:            node.Labels,
		CreationTimestamp: node.CreationTimestamp.Time.Format(time.RFC1123Z),
		Conditions:        node.Status.Conditions,
		Capacity:          kube.TranslateResourseList(node.Status.Capacity),
		SystemInfo:        node.Status.NodeInfo,
	}
	allPods, err := kubeclient.GetAllPods()
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}
	d.Pods = kube.FilterNodePods(allPods, node)
	d.TerminatedPods, d.NonTerminatedPods = kube.FilterTerminatedPods(d.Pods)
	d.NonTerminatedPodsResources = computePodsResources(d.NonTerminatedPods, node)
	d.TerminatedPodsResources = computePodsResources(d.TerminatedPods, node)
	d.AllocatedResources, err = computeNodeResources(d.NonTerminatedPods, node)
	if err != nil {
		c.HTML(http.StatusInternalServerError, "error", gin.H{"error": err.Error()})
		return
	}

	var pods []page.Pod
	for _, pod := range d.Pods {
		pods = append(pods, genOnePod(pod))
	}

	var nodeEvents []page.Event
	var nodeEventList *api.EventList
	if ref, err := api.GetReference(node); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
	} else {
		ref.UID = types.UID(ref.Name)
		if nodeEventList, err = kubeclient.Get().Events("").Search(ref); err != nil {
			glog.Errorf("Unable to search events for '%#v': %v", node, err)
		}
	}
	if nodeEventList != nil {
		nodeEvents = genEvents(nodeEventList)
	}

	var events []page.Event
	var eventList *api.EventList
	if eventList, err = kubeclient.Get().Events("").List(labels.Everything(), fields.Everything()); err != nil {
		glog.Errorf("Unable to search events for '%#v': %v", node, err)
	}
	if eventList != nil {
		events = genEvents(&api.EventList{Items: kube.FilterEventsFromNode(eventList.Items, node)})
	}

	c.HTML(http.StatusOK, "nodeDetail", gin.H{
		"title":      nodename,
		"node":       d,
		"pods":       pods,
		"events":     events,
		"nodeEvents": nodeEvents,
	})
}
func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, claim *api.PersistentVolumeClaim) (err error) {
	glog.V(5).Infof("Synchronizing PersistentVolumeClaim[%s]\n", claim.Name)

	// claims can be in one of the following states:
	//
	// ClaimPending -- default value -- not bound to a claim.  A volume that matches the claim may not exist.
	// ClaimBound -- bound to a volume.  claim.Status.VolumeRef != nil
	currentPhase := claim.Status.Phase
	nextPhase := currentPhase

	switch currentPhase {
	// pending claims await a matching volume
	case api.ClaimPending:
		volume, err := volumeIndex.FindBestMatchForClaim(claim)
		if err != nil {
			return err
		}
		if volume == nil {
			return fmt.Errorf("A volume match does not exist for persistent claim: %s", claim.Name)
		}

		// make a binding reference to the claim.
		// triggers update of the claim in this controller, which builds claim status
		claim.Spec.VolumeName = volume.Name
		// TODO: make this similar to Pod's binding both with BindingREST subresource and GuaranteedUpdate helper in etcd.go
		claim, err = binderClient.UpdatePersistentVolumeClaim(claim)
		if err == nil {
			nextPhase = api.ClaimBound
			glog.V(5).Infof("PersistentVolumeClaim[%s] is bound\n", claim.Name)
		} else {
			// Rollback by unsetting the ClaimRef on the volume pointer.
			// the volume in the index will be unbound again and ready to be matched.
			claim.Spec.VolumeName = ""
			// Rollback by restoring original phase to claim pointer
			nextPhase = api.ClaimPending
			return fmt.Errorf("Error updating volume: %+v\n", err)
		}

	case api.ClaimBound:
		volume, err := binderClient.GetPersistentVolume(claim.Spec.VolumeName)
		if err != nil {
			return fmt.Errorf("Unexpected error getting persistent volume: %v\n", err)
		}

		if volume.Spec.ClaimRef == nil {
			glog.V(5).Infof("Rebuilding bind on pv.Spec.ClaimRef\n")
			claimRef, err := api.GetReference(claim)
			if err != nil {
				return fmt.Errorf("Unexpected error getting claim reference: %v\n", err)
			}
			volume.Spec.ClaimRef = claimRef
			_, err = binderClient.UpdatePersistentVolume(volume)
			if err != nil {
				return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
			}
		}

		// all "actuals" are transferred from PV to PVC so the user knows what
		// type of volume they actually got for their claim.
		// Volumes cannot have zero AccessModes, so checking that a claim has access modes
		// is sufficient to tell us if these values have already been set.
		if len(claim.Status.AccessModes) == 0 {
			claim.Status.Phase = api.ClaimBound
			claim.Status.AccessModes = volume.Spec.AccessModes
			claim.Status.Capacity = volume.Spec.Capacity
			_, err := binderClient.UpdatePersistentVolumeClaimStatus(claim)
			if err != nil {
				return fmt.Errorf("Unexpected error saving claim status: %+v", err)
			}
		}
	}

	if currentPhase != nextPhase {
		claim.Status.Phase = nextPhase
		binderClient.UpdatePersistentVolumeClaimStatus(claim)
	}
	return nil
}