// CreateOrUpdate attempts to update the current etcd state with the provided // allocation. func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error { e.lock.Lock() defer e.lock.Unlock() last := "" err := e.storage.GuaranteedUpdate(context.TODO(), e.baseKey, &api.RangeAllocation{}, true, nil, storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) switch { case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0: if snapshot.ResourceVersion != existing.ResourceVersion { return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match")) } case len(existing.ResourceVersion) != 0: return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource")) } last = snapshot.ResourceVersion return snapshot, nil }), ) if err != nil { return storeerr.InterpretUpdateError(err, e.resource, "") } err = e.alloc.Restore(snapshot.Range, snapshot.Data) if err == nil { e.last = last } return err }
// assignPod assigns the given pod to the given machine. func (r *BindingREST) assignPod(ctx api.Context, podID string, machine string, annotations map[string]string) (err error) { if _, err = r.setPodHostAndAnnotations(ctx, podID, "", machine, annotations); err != nil { err = storeerr.InterpretGetError(err, api.Resource("pods"), podID) err = storeerr.InterpretUpdateError(err, api.Resource("pods"), podID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewConflict(api.Resource("pods/binding"), podID, err) } } return }
func (r *RollbackREST) rollbackDeployment(ctx api.Context, deploymentID string, config *extensions.RollbackConfig, annotations map[string]string) (err error) { if _, err = r.setDeploymentRollback(ctx, deploymentID, config, annotations); err != nil { err = storeerr.InterpretGetError(err, extensions.Resource("deployments"), deploymentID) err = storeerr.InterpretUpdateError(err, extensions.Resource("deployments"), deploymentID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewConflict(extensions.Resource("deployments/rollback"), deploymentID, err) } } return }
func (r *RollbackREST) rollbackDeployment(ctx genericapirequest.Context, deploymentID string, config *extensions.RollbackConfig, annotations map[string]string) error { if _, err := r.setDeploymentRollback(ctx, deploymentID, config, annotations); err != nil { err = storeerr.InterpretGetError(err, extensions.Resource("deployments"), deploymentID) err = storeerr.InterpretUpdateError(err, extensions.Resource("deployments"), deploymentID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewInternalError(err) } return err } return nil }
// this functions need to be kept synced with updateForGracefulDeletionAndFinalizers. func (e *Store) updateForGracefulDeletion(ctx api.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { lastGraceful := int64(0) out = e.NewFunc() err = e.Storage.GuaranteedUpdate( ctx, key, out, false, &preconditions, storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) if err != nil { return nil, err } if pendingGraceful { return nil, errAlreadyDeleting } if !graceful { return nil, errDeleteNow } lastGraceful = *options.GracePeriodSeconds lastExisting = existing return existing, nil }), ) switch err { case nil: if lastGraceful > 0 { return nil, false, false, out, lastExisting } // If we are here, the registry supports grace period mechanism and // we are intentionally delete gracelessly. In this case, we may // enter a race with other k8s components. If other component wins // the race, the object will not be found, and we should tolerate // the NotFound error. See // https://github.com/kubernetes/kubernetes/issues/19403 for // details. return nil, true, true, out, lastExisting case errDeleteNow: // we've updated the object to have a zero grace period, or it's already at 0, so // we should fall through and truly delete the object. return nil, false, true, out, lastExisting case errAlreadyDeleting: out, err = e.finalizeDelete(in, true) return err, false, false, out, lastExisting default: return storeerr.InterpretUpdateError(err, e.QualifiedResource, name), false, false, out, lastExisting } }
// tryUpdate performs a read-update to persist the latest snapshot state of allocation. func (e *Etcd) tryUpdate(fn func() error) error { err := e.storage.GuaranteedUpdate(context.TODO(), e.baseKey, &api.RangeAllocation{}, true, nil, storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) if len(existing.ResourceVersion) == 0 { return nil, fmt.Errorf("cannot allocate resources of type %s at this time", e.resource.String()) } if existing.ResourceVersion != e.last { if err := e.alloc.Restore(existing.Range, existing.Data); err != nil { return nil, err } if err := fn(); err != nil { return nil, err } } e.last = existing.ResourceVersion rangeSpec, data := e.alloc.Snapshot() existing.Range = rangeSpec existing.Data = data return existing, nil }), ) return storeerr.InterpretUpdateError(err, e.resource, "") }
// this functions need to be kept synced with updateForGracefulDeletion. func (e *Store) updateForGracefulDeletionAndFinalizers(ctx api.Context, name, key string, options *api.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) { lastGraceful := int64(0) var pendingFinalizers bool out = e.NewFunc() err = e.Storage.GuaranteedUpdate( ctx, key, out, false, &preconditions, storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) if err != nil { return nil, err } if pendingGraceful { return nil, errAlreadyDeleting } // Add/remove the orphan finalizer as the options dictates. // Note that this occurs after checking pendingGraceufl, so // finalizers cannot be updated via DeleteOptions if deletion has // started. existingAccessor, err := meta.Accessor(existing) if err != nil { return nil, err } shouldUpdate, newFinalizers := shouldUpdateFinalizers(existingAccessor, options) if shouldUpdate { existingAccessor.SetFinalizers(newFinalizers) } if !graceful { // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0 if pendingFinalizers { glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) err = markAsDeleting(existing) if err != nil { return nil, err } return existing, nil } return nil, errDeleteNow } lastGraceful = *options.GracePeriodSeconds lastExisting = existing return existing, nil }), ) switch err { case nil: // If there are pending finalizers, we never delete the object immediately. if pendingFinalizers { return nil, false, false, out, lastExisting } if lastGraceful > 0 { return nil, false, false, out, lastExisting } // If we are here, the registry supports grace period mechanism and // we are intentionally delete gracelessly. In this case, we may // enter a race with other k8s components. If other component wins // the race, the object will not be found, and we should tolerate // the NotFound error. See // https://github.com/kubernetes/kubernetes/issues/19403 for // details. return nil, true, true, out, lastExisting case errDeleteNow: // we've updated the object to have a zero grace period, or it's already at 0, so // we should fall through and truly delete the object. return nil, false, true, out, lastExisting case errAlreadyDeleting: out, err = e.finalizeDelete(in, true) return err, false, false, out, lastExisting default: return storeerr.InterpretUpdateError(err, e.QualifiedResource, name), false, false, out, lastExisting } }
// Update performs an atomic update and set of the object. Returns the result of the update // or an error. If the registry allows create-on-update, the create flow will be executed. // A bool is returned along with the object and any errors, to indicate object creation. func (e *Store) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { key, err := e.KeyFunc(ctx, name) if err != nil { return nil, false, err } var ( creatingObj runtime.Object creating = false ) storagePreconditions := &storage.Preconditions{} if preconditions := objInfo.Preconditions(); preconditions != nil { storagePreconditions.UID = preconditions.UID } out := e.NewFunc() // deleteObj is only used in case a deletion is carried out var deleteObj runtime.Object err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { // Given the existing object, get the new object obj, err := objInfo.UpdatedObject(ctx, existing) if err != nil { return nil, nil, err } // If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version, // then we populate it with the latest version. // Else, we check that the version specified by the user matches the version of latest storage object. resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() version, err := e.Storage.Versioner().ObjectResourceVersion(existing) if err != nil { return nil, nil, err } if version == 0 { if !e.UpdateStrategy.AllowCreateOnUpdate() { return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name) } creating = true creatingObj = obj if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, nil, err } ttl, err := e.calculateTTL(obj, 0, false) if err != nil { return nil, nil, err } return obj, &ttl, nil } creating = false creatingObj = nil if doUnconditionalUpdate { // Update the object's resource version to match the latest storage object's resource version. err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) if err != nil { return nil, nil, err } } else { // Check if the object's resource version matches the latest resource version. newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } if newVersion == 0 { // TODO: The Invalid error should has a field for Resource. // After that field is added, we should fill the Resource and // leave the Kind field empty. See the discussion in #18526. qualifiedKind := unversioned.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource} fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")} return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList) } if newVersion != version { return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) } } if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { return nil, nil, err } delete := e.shouldDelete(ctx, key, obj, existing) if delete { deleteObj = obj return nil, nil, errEmptiedFinalizers } ttl, err := e.calculateTTL(obj, res.TTL, true) if err != nil { return nil, nil, err } if int64(ttl) != res.TTL { return obj, &ttl, nil } return obj, nil, nil }) if err != nil { // delete the object if err == errEmptiedFinalizers { return e.deleteForEmptyFinalizers(ctx, name, key, deleteObj, storagePreconditions) } if creating { err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) } else { err = storeerr.InterpretUpdateError(err, e.QualifiedResource, name) } return nil, false, err } if creating { if e.AfterCreate != nil { if err := e.AfterCreate(out); err != nil { return nil, false, err } } } else { if e.AfterUpdate != nil { if err := e.AfterUpdate(out); err != nil { return nil, false, err } } } if e.Decorator != nil { if err := e.Decorator(out); err != nil { return nil, false, err } } return out, creating, nil }
// Delete enforces life-cycle rules for namespace termination func (r *REST) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { nsObj, err := r.Get(ctx, name) if err != nil { return nil, err } namespace := nsObj.(*api.Namespace) // Ensure we have a UID precondition if options == nil { options = api.NewDeleteOptions(0) } if options.Preconditions == nil { options.Preconditions = &api.Preconditions{} } if options.Preconditions.UID == nil { options.Preconditions.UID = &namespace.UID } else if *options.Preconditions.UID != namespace.UID { err = apierrors.NewConflict( api.Resource("namespaces"), name, fmt.Errorf("Precondition failed: UID in precondition: %v, UID in object meta: %v", *options.Preconditions.UID, namespace.UID), ) return nil, err } // upon first request to delete, we switch the phase to start namespace termination // TODO: enhance graceful deletion's calls to DeleteStrategy to allow phase change and finalizer patterns if namespace.DeletionTimestamp.IsZero() { key, err := r.Store.KeyFunc(ctx, name) if err != nil { return nil, err } preconditions := storage.Preconditions{UID: options.Preconditions.UID} out := r.Store.NewFunc() err = r.Store.Storage.GuaranteedUpdate( ctx, key, out, false, &preconditions, storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { existingNamespace, ok := existing.(*api.Namespace) if !ok { // wrong type return nil, fmt.Errorf("expected *api.Namespace, got %v", existing) } // Set the deletion timestamp if needed if existingNamespace.DeletionTimestamp.IsZero() { now := unversioned.Now() existingNamespace.DeletionTimestamp = &now } // Set the namespace phase to terminating, if needed if existingNamespace.Status.Phase != api.NamespaceTerminating { existingNamespace.Status.Phase = api.NamespaceTerminating } return existingNamespace, nil }), ) if err != nil { err = storageerr.InterpretGetError(err, api.Resource("namespaces"), name) err = storageerr.InterpretUpdateError(err, api.Resource("namespaces"), name) if _, ok := err.(*apierrors.StatusError); !ok { err = apierrors.NewInternalError(err) } return nil, err } return out, nil } // prior to final deletion, we must ensure that finalizers is empty if len(namespace.Spec.Finalizers) != 0 { err = apierrors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("The system is ensuring all content is removed from this namespace. Upon completion, this namespace will automatically be purged by the system.")) return nil, err } return r.Store.Delete(ctx, name, options) }
// Delete removes the item from storage. func (e *Store) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { key, err := e.KeyFunc(ctx, name) if err != nil { return nil, err } obj := e.NewFunc() if err := e.Storage.Get(ctx, key, obj, false); err != nil { return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name) } // support older consumers of delete by treating "nil" as delete immediately if options == nil { options = api.NewDeleteOptions(0) } var preconditions storage.Preconditions if options.Preconditions != nil { preconditions.UID = options.Preconditions.UID } graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options) if err != nil { return nil, err } if pendingGraceful { return e.finalizeDelete(obj, false) } var ignoreNotFound bool = false var lastExisting runtime.Object = nil if graceful { out := e.NewFunc() lastGraceful := int64(0) err := e.Storage.GuaranteedUpdate( ctx, key, out, false, &preconditions, storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) { graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options) if err != nil { return nil, err } if pendingGraceful { return nil, errAlreadyDeleting } if !graceful { return nil, errDeleteNow } lastGraceful = *options.GracePeriodSeconds lastExisting = existing return existing, nil }), ) switch err { case nil: if lastGraceful > 0 { return out, nil } // If we are here, the registry supports grace period mechanism and // we are intentionally delete gracelessly. In this case, we may // enter a race with other k8s components. If other component wins // the race, the object will not be found, and we should tolerate // the NotFound error. See // https://github.com/kubernetes/kubernetes/issues/19403 for // details. ignoreNotFound = true // exit the switch and delete immediately case errDeleteNow: // we've updated the object to have a zero grace period, or it's already at 0, so // we should fall through and truly delete the object. case errAlreadyDeleting: return e.finalizeDelete(obj, true) default: return nil, storeerr.InterpretUpdateError(err, e.QualifiedResource, name) } } // delete immediately, or no graceful deletion supported out := e.NewFunc() if err := e.Storage.Delete(ctx, key, out, &preconditions); err != nil { // Please refer to the place where we set ignoreNotFound for the reason // why we ignore the NotFound error . if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil { // The lastExisting object may not be the last state of the object // before its deletion, but it's the best approximation. return e.finalizeDelete(lastExisting, true) } return nil, storeerr.InterpretDeleteError(err, e.QualifiedResource, name) } return e.finalizeDelete(out, true) }
// Update performs an atomic update and set of the object. Returns the result of the update // or an error. If the registry allows create-on-update, the create flow will be executed. // A bool is returned along with the object and any errors, to indicate object creation. func (e *Store) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) { name, err := e.ObjectNameFunc(obj) if err != nil { return nil, false, err } key, err := e.KeyFunc(ctx, name) if err != nil { return nil, false, err } // If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version, // then we populate it with the latest version. // Else, we check that the version specified by the user matches the version of latest storage object. resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, false, err } doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() // TODO: expose TTL creating := false out := e.NewFunc() meta, err := api.ObjectMetaFor(obj) if err != nil { return nil, false, kubeerr.NewInternalError(err) } var preconditions *storage.Preconditions // If the UID of the new object is specified, we use it as an Update precondition. if len(meta.UID) != 0 { UIDCopy := meta.UID preconditions = &storage.Preconditions{UID: &UIDCopy} } err = e.Storage.GuaranteedUpdate(ctx, key, out, true, preconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { // Since we return 'obj' from this function and it can be modified outside this // function, we are resetting resourceVersion to the initial value here. // // TODO: In fact, we should probably return a DeepCopy of obj in all places. err := e.Storage.Versioner().UpdateObject(obj, resourceVersion) if err != nil { return nil, nil, err } version, err := e.Storage.Versioner().ObjectResourceVersion(existing) if err != nil { return nil, nil, err } if version == 0 { if !e.UpdateStrategy.AllowCreateOnUpdate() { return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name) } creating = true if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, nil, err } ttl, err := e.calculateTTL(obj, 0, false) if err != nil { return nil, nil, err } return obj, &ttl, nil } creating = false if doUnconditionalUpdate { // Update the object's resource version to match the latest storage object's resource version. err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) if err != nil { return nil, nil, err } } else { // Check if the object's resource version matches the latest resource version. newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } if newVersion == 0 { // TODO: The Invalid error should has a field for Resource. // After that field is added, we should fill the Resource and // leave the Kind field empty. See the discussion in #18526. qualifiedKind := unversioned.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource} fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")} return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList) } if newVersion != version { return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf("the object has been modified; please apply your changes to the latest version and try again")) } } if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { return nil, nil, err } ttl, err := e.calculateTTL(obj, res.TTL, true) if err != nil { return nil, nil, err } if int64(ttl) != res.TTL { return obj, &ttl, nil } return obj, nil, nil }) if err != nil { if creating { err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) } else { err = storeerr.InterpretUpdateError(err, e.QualifiedResource, name) } return nil, false, err } if creating { if e.AfterCreate != nil { if err := e.AfterCreate(out); err != nil { return nil, false, err } } } else { if e.AfterUpdate != nil { if err := e.AfterUpdate(out); err != nil { return nil, false, err } } } if e.Decorator != nil { if err := e.Decorator(obj); err != nil { return nil, false, err } } return out, creating, nil }