// updateVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { newVolume, ok := newObj.(*api.PersistentVolume) if !ok { glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) return } if ctrl.upgradeVolumeFrom1_2(newVolume) { // volume deleted return } // Store the new volume version in the cache and do not process it if this // is an old version. new, err := ctrl.storeVolumeUpdate(newVolume) if err != nil { glog.Errorf("%v", err) } if !new { return } if err := ctrl.syncVolume(newVolume); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) } else { glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) } } }
// This test the fast-fail path. We test that the precondition gets verified // again before deleting the object in tests of pkg/storage/etcd. func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, t.namer(1)) objectMeta := t.getObjectMetaOrFail(foo) objectMeta.UID = types.UID("UID0000") if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID1111")) if err == nil || !errors.IsConflict(err) { t.Errorf("unexpected error: %v", err) } obj, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewPreconditionDeleteOptions("UID0000")) if err != nil { t.Errorf("unexpected error: %v", err) } if !t.returnDeletedObject { if status, ok := obj.(*unversioned.Status); !ok { t.Errorf("expected status of delete, got %v", status) } else if status.Status != unversioned.StatusSuccess { t.Errorf("expected success, got: %v", status.Status) } } _, err = getFn(ctx, foo) if err == nil || !isNotFoundFn(err) { t.Errorf("unexpected error: %v", err) } }
func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, t.namer(3)) if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } storedFoo, err := getFn(ctx, foo) if err != nil { t.Errorf("unexpected error: %v", err) } older := copyOrDie(storedFoo) olderMeta := t.getObjectMetaOrFail(older) olderMeta.ResourceVersion = "1" _, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.Name, rest.DefaultUpdatedObjectInfo(older, api.Scheme)) if err == nil { t.Errorf("Expected an error, but we didn't get one") } else if !errors.IsConflict(err) { t.Errorf("Expected Conflict error, got '%v'", err) } }
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { // Store the new claim version in the cache and do not process it if this is // an old version. newClaim, ok := newObj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) return } new, err := ctrl.storeClaimUpdate(newClaim) if err != nil { glog.Errorf("%v", err) } if !new { return } if err := ctrl.syncClaim(newClaim); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) } else { glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) } } }
func (s *ServiceController) persistUpdate(service *api.Service) error { var err error for i := 0; i < clientRetryCount; i++ { _, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service) if err == nil { return nil } // If the object no longer exists, we don't want to recreate it. Just bail // out so that we can process the delete, which we should soon be receiving // if we haven't already. if errors.IsNotFound(err) { glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) return nil } // TODO: Try to resolve the conflict if the change was unrelated to load // balancer status. For now, just rely on the fact that we'll // also process the update that caused the resource version to change. if errors.IsConflict(err) { glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v", service.Namespace, service.Name, err) return nil } glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v", service.Namespace, service.Name, err) time.Sleep(clientRetryInterval) } return err }
func TestStoreUpdate(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test"}, Spec: api.PodSpec{NodeName: "machine2"}, } podAWithResourceVersion := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "7"}, Spec: api.PodSpec{NodeName: "machine"}, } testContext := api.WithNamespace(api.NewContext(), "test") server, registry := NewTestGenericStoreRegistry(t) defer server.Terminate(t) // Test1 try to update a non-existing node _, _, err := registry.Update(testContext, podA.Name, rest.DefaultUpdatedObjectInfo(podA, api.Scheme)) if !errors.IsNotFound(err) { t.Errorf("Unexpected error: %v", err) } // Test2 createIfNotFound and verify registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = true if !updateAndVerify(t, testContext, registry, podA) { t.Errorf("Unexpected error updating podA") } registry.UpdateStrategy.(*testRESTStrategy).allowCreateOnUpdate = false // Test3 outofDate _, _, err = registry.Update(testContext, podAWithResourceVersion.Name, rest.DefaultUpdatedObjectInfo(podAWithResourceVersion, api.Scheme)) if !errors.IsConflict(err) { t.Errorf("Unexpected error updating podAWithResourceVersion: %v", err) } // Test4 normal update and verify if !updateAndVerify(t, testContext, registry, podB) { t.Errorf("Unexpected error updating podB") } // Test5 unconditional update // NOTE: The logic for unconditional updates doesn't make sense to me, and imho should be removed. // doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() // ^^ That condition can *never be true due to the creation of root objects. // // registry.UpdateStrategy.(*testRESTStrategy).allowUnconditionalUpdate = true // updateAndVerify(t, testContext, registry, podAWithResourceVersion) }
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { _ = ctrl.claims.Delete(obj) var volume *api.PersistentVolume var claim *api.PersistentVolumeClaim var ok bool claim, ok = obj.(*api.PersistentVolumeClaim) if !ok { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) return } } else { glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) return } } if !ok || claim == nil { return } glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { if volume, ok = pvObj.(*api.PersistentVolume); ok { // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. if volume != nil { err := ctrl.syncVolume(volume) if err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the // controller recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) } else { glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) } } } } else { glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) } } }
// retryOnConflictError retries the specified fn if there was a conflict error // TODO RetryOnConflict should be a generic concept in client code func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) { latestNamespace := namespace for { result, err = fn(kubeClient, latestNamespace) if err == nil { return result, nil } if !errors.IsConflict(err) { return nil, err } latestNamespace, err = kubeClient.Core().Namespaces().Get(latestNamespace.Name) if err != nil { return nil, err } } }
func (p *patcher) patch(current runtime.Object, modified []byte, source, namespace, name string) ([]byte, error) { var getErr error patchBytes, err := p.patchSimple(current, modified, source, namespace, name) for i := 1; i <= maxPatchRetry && errors.IsConflict(err); i++ { if i > triesBeforeBackOff { p.backOff.Sleep(backOffPeriod) } current, getErr = p.helper.Get(namespace, name, false) if getErr != nil { return nil, getErr } patchBytes, err = p.patchSimple(current, modified, source, namespace, name) } return patchBytes, err }
// deleteVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { _ = ctrl.volumes.store.Delete(obj) var volume *api.PersistentVolume var ok bool volume, ok = obj.(*api.PersistentVolume) if !ok { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { volume, ok = unknown.Obj.(*api.PersistentVolume) if !ok { glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) return } } else { glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) return } } if !ok || volume == nil || volume.Spec.ClaimRef == nil { return } glog.V(4).Infof("volume %q deleted", volume.Name) if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { // sync the claim when its volume is deleted. Explicitly syncing the // claim here in response to volume deletion prevents the claim from // waiting until the next sync period for its Lost status. err := ctrl.syncClaim(claim) if err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the // controller recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) } else { glog.Errorf("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) } } } else { glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) } } }
func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc) { ctx := t.TestContext() foo := copyOrDie(obj) t.setObjectMeta(foo, t.namer(5)) objectMeta := t.getObjectMetaOrFail(foo) objectMeta.UID = types.UID("UID0000") if err := createFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } objectMeta.UID = types.UID("UID1111") obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.Name, rest.DefaultUpdatedObjectInfo(foo, api.Scheme)) if created || obj != nil { t.Errorf("expected nil object and no creation for object: %v", foo) } if err == nil || !errors.IsConflict(err) { t.Errorf("unexpected error: %v", err) } }
// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting // write. Callers should preserve previous executions if they wish to retry changes. It performs an // exponential backoff. // // var pod *api.Pod // err := RetryOnConflict(DefaultBackoff, func() (err error) { // pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) // return // }) // if err != nil { // // may be conflict if max retries were hit // return err // } // ... // // TODO: Make Backoff an interface? func RetryOnConflict(backoff wait.Backoff, fn func() error) error { var lastConflictErr error err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := fn() switch { case err == nil: return true, nil case errors.IsConflict(err): lastConflictErr = err return false, nil default: return false, err } }) if err == wait.ErrWaitTimeout { err = lastConflictErr } return err }
// patchResource divides PatchResource for easier unit testing func patchResource( ctx api.Context, admit updateAdmissionFunc, timeout time.Duration, versionedObj runtime.Object, patcher rest.Patcher, name string, patchType api.PatchType, patchJS []byte, namer ScopeNamer, copier runtime.ObjectCopier, resource unversioned.GroupVersionResource, codec runtime.Codec, ) (runtime.Object, error) { namespace := api.NamespaceValue(ctx) var ( originalObjJS []byte originalPatchedObjJS []byte lastConflictErr error ) // applyPatch is called every time GuaranteedUpdate asks for the updated object, // and is given the currently persisted object as input. applyPatch := func(_ api.Context, _, currentObject runtime.Object) (runtime.Object, error) { // Make sure we actually have a persisted currentObject if hasUID, err := hasUID(currentObject); err != nil { return nil, err } else if !hasUID { return nil, errors.NewNotFound(resource.GroupResource(), name) } switch { case len(originalObjJS) == 0 || len(originalPatchedObjJS) == 0: // first time through, // 1. apply the patch // 2. save the originalJS and patchedJS to detect whether there were conflicting changes on retries if js, err := runtime.Encode(codec, currentObject); err != nil { return nil, err } else { originalObjJS = js } if js, err := getPatchedJS(patchType, originalObjJS, patchJS, versionedObj); err != nil { return nil, err } else { originalPatchedObjJS = js } objToUpdate := patcher.New() if err := runtime.DecodeInto(codec, originalPatchedObjJS, objToUpdate); err != nil { return nil, err } if err := checkName(objToUpdate, name, namespace, namer); err != nil { return nil, err } return objToUpdate, nil default: // on a conflict, // 1. build a strategic merge patch from originalJS and the patchedJS. Different patch types can // be specified, but a strategic merge patch should be expressive enough handle them. Build the // patch with this type to handle those cases. // 2. build a strategic merge patch from originalJS and the currentJS // 3. ensure no conflicts between the two patches // 4. apply the #1 patch to the currentJS object currentObjectJS, err := runtime.Encode(codec, currentObject) if err != nil { return nil, err } currentPatch, err := strategicpatch.CreateStrategicMergePatch(originalObjJS, currentObjectJS, versionedObj) if err != nil { return nil, err } originalPatch, err := strategicpatch.CreateStrategicMergePatch(originalObjJS, originalPatchedObjJS, versionedObj) if err != nil { return nil, err } diff1 := make(map[string]interface{}) if err := json.Unmarshal(originalPatch, &diff1); err != nil { return nil, err } diff2 := make(map[string]interface{}) if err := json.Unmarshal(currentPatch, &diff2); err != nil { return nil, err } hasConflicts, err := strategicpatch.HasConflicts(diff1, diff2) if err != nil { return nil, err } if hasConflicts { glog.V(4).Infof("patchResource failed for resource %s, because there is a meaningful conflict.\n diff1=%v\n, diff2=%v\n", name, diff1, diff2) // Return the last conflict error we got if we have one if lastConflictErr != nil { return nil, lastConflictErr } // Otherwise manufacture one of our own return nil, errors.NewConflict(resource.GroupResource(), name, nil) } newlyPatchedObjJS, err := getPatchedJS(api.StrategicMergePatchType, currentObjectJS, originalPatch, versionedObj) if err != nil { return nil, err } objToUpdate := patcher.New() if err := runtime.DecodeInto(codec, newlyPatchedObjJS, objToUpdate); err != nil { return nil, err } return objToUpdate, nil } } // applyAdmission is called every time GuaranteedUpdate asks for the updated object, // and is given the currently persisted object and the patched object as input. applyAdmission := func(ctx api.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) { return patchedObject, admit(patchedObject, currentObject) } updatedObjectInfo := rest.DefaultUpdatedObjectInfo(nil, copier, applyPatch, applyAdmission) return finishRequest(timeout, func() (runtime.Object, error) { updateObject, _, updateErr := patcher.Update(ctx, name, updatedObjectInfo) for i := 0; i < MaxPatchConflicts && (errors.IsConflict(updateErr)); i++ { lastConflictErr = updateErr updateObject, _, updateErr = patcher.Update(ctx, name, updatedObjectInfo) } return updateObject, updateErr }) }
func TestScaleUpdate(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) name := "foo" var rs extensions.ReplicaSet ctx := api.WithNamespace(api.NewContext(), api.NamespaceDefault) key := etcdtest.AddPrefix("/replicasets/" + api.NamespaceDefault + "/" + name) if err := storage.ReplicaSet.Storage.Create(ctx, key, &validReplicaSet, &rs, 0); err != nil { t.Fatalf("error setting new replica set (key: %s) %v: %v", key, validReplicaSet, err) } replicas := 12 update := extensions.Scale{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, }, Spec: extensions.ScaleSpec{ Replicas: int32(replicas), }, } if _, _, err := storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update, api.Scheme)); err != nil { t.Fatalf("error updating scale %v: %v", update, err) } obj, err := storage.Scale.Get(ctx, name) if err != nil { t.Fatalf("error fetching scale for %s: %v", name, err) } scale := obj.(*extensions.Scale) if scale.Spec.Replicas != int32(replicas) { t.Errorf("wrong replicas count expected: %d got: %d", replicas, scale.Spec.Replicas) } update.ResourceVersion = rs.ResourceVersion update.Spec.Replicas = 15 if _, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update, api.Scheme)); err != nil && !errors.IsConflict(err) { t.Fatalf("unexpected error, expecting an update conflict but got %v", err) } }