func TestCheckGeneratedNameError(t *testing.T) { expect := errors.NewNotFound(api.Resource("foos"), "bar") if err := rest.CheckGeneratedNameError(Strategy, expect, &api.Pod{}); err != expect { t.Errorf("NotFoundError should be ignored: %v", err) } expect = errors.NewAlreadyExists(api.Resource("foos"), "bar") if err := rest.CheckGeneratedNameError(Strategy, expect, &api.Pod{}); err != expect { t.Errorf("AlreadyExists should be returned when no GenerateName field: %v", err) } expect = errors.NewAlreadyExists(api.Resource("foos"), "bar") if err := rest.CheckGeneratedNameError(Strategy, expect, &api.Pod{ObjectMeta: metav1.ObjectMeta{GenerateName: "foo"}}); err == nil || !errors.IsServerTimeout(err) { t.Errorf("expected try again later error: %v", err) } }
// Create inserts a new item according to the unique key from the object. func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) { if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, err } name, err := e.ObjectNameFunc(obj) if err != nil { return nil, err } key, err := e.KeyFunc(ctx, name) if err != nil { return nil, err } ttl, err := e.calculateTTL(obj, 0, false) if err != nil { return nil, err } out := e.NewFunc() if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil { err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj) if !kubeerr.IsAlreadyExists(err) { return nil, err } if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil { return nil, err } accessor, errGetAcc := meta.Accessor(out) if errGetAcc != nil { return nil, err } if accessor.GetDeletionTimestamp() != nil { msg := &err.(*kubeerr.StatusError).ErrStatus.Message *msg = fmt.Sprintf("object is being deleted: %s", *msg) } return nil, err } if e.AfterCreate != nil { if err := e.AfterCreate(out); err != nil { return nil, err } } if e.Decorator != nil { if err := e.Decorator(obj); err != nil { return nil, err } } return out, nil }
// Update performs an atomic update and set of the object. Returns the result of the update // or an error. If the registry allows create-on-update, the create flow will be executed. // A bool is returned along with the object and any errors, to indicate object creation. func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { key, err := e.KeyFunc(ctx, name) if err != nil { return nil, false, err } var ( creatingObj runtime.Object creating = false ) storagePreconditions := &storage.Preconditions{} if preconditions := objInfo.Preconditions(); preconditions != nil { storagePreconditions.UID = preconditions.UID } out := e.NewFunc() // deleteObj is only used in case a deletion is carried out var deleteObj runtime.Object err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { // Given the existing object, get the new object obj, err := objInfo.UpdatedObject(ctx, existing) if err != nil { return nil, nil, err } // If AllowUnconditionalUpdate() is true and the object specified by the user does not have a resource version, // then we populate it with the latest version. // Else, we check that the version specified by the user matches the version of latest storage object. resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate() version, err := e.Storage.Versioner().ObjectResourceVersion(existing) if err != nil { return nil, nil, err } if version == 0 { if !e.UpdateStrategy.AllowCreateOnUpdate() { return nil, nil, kubeerr.NewNotFound(e.QualifiedResource, name) } creating = true creatingObj = obj if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, nil, err } ttl, err := e.calculateTTL(obj, 0, false) if err != nil { return nil, nil, err } return obj, &ttl, nil } creating = false creatingObj = nil if doUnconditionalUpdate { // Update the object's resource version to match the latest storage object's resource version. err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion) if err != nil { return nil, nil, err } } else { // Check if the object's resource version matches the latest resource version. newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj) if err != nil { return nil, nil, err } if newVersion == 0 { // TODO: The Invalid error should has a field for Resource. // After that field is added, we should fill the Resource and // leave the Kind field empty. See the discussion in #18526. qualifiedKind := schema.GroupKind{Group: e.QualifiedResource.Group, Kind: e.QualifiedResource.Resource} fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")} return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList) } if newVersion != version { return nil, nil, kubeerr.NewConflict(e.QualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg)) } } if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { return nil, nil, err } delete := e.shouldDelete(ctx, key, obj, existing) if delete { deleteObj = obj return nil, nil, errEmptiedFinalizers } ttl, err := e.calculateTTL(obj, res.TTL, true) if err != nil { return nil, nil, err } if int64(ttl) != res.TTL { return obj, &ttl, nil } return obj, nil, nil }) if err != nil { // delete the object if err == errEmptiedFinalizers { return e.deleteForEmptyFinalizers(ctx, name, key, deleteObj, storagePreconditions) } if creating { err = storeerr.InterpretCreateError(err, e.QualifiedResource, name) err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj) } else { err = storeerr.InterpretUpdateError(err, e.QualifiedResource, name) } return nil, false, err } if creating { if e.AfterCreate != nil { if err := e.AfterCreate(out); err != nil { return nil, false, err } } } else { if e.AfterUpdate != nil { if err := e.AfterUpdate(out); err != nil { return nil, false, err } } } if e.Decorator != nil { if err := e.Decorator(out); err != nil { return nil, false, err } } return out, creating, nil }
func (rs *REST) Create(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) { service := obj.(*api.Service) if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil { return nil, err } // TODO: this should probably move to strategy.PrepareForCreate() releaseServiceIP := false defer func() { if releaseServiceIP { if api.IsServiceIPSet(service) { rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) } } }() nodePortOp := portallocator.StartOperation(rs.serviceNodePorts) defer nodePortOp.Finish() if api.IsServiceIPRequested(service) { // Allocate next available. ip, err := rs.serviceIPs.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err)) } service.Spec.ClusterIP = ip.String() releaseServiceIP = true } else if api.IsServiceIPSet(service) { // Try to respect the requested IP. if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())} return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el) } releaseServiceIP = true } assignNodePorts := shouldAssignNodePorts(service) svcPortToNodePort := map[int]int{} for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] allocatedNodePort := svcPortToNodePort[int(servicePort.Port)] if allocatedNodePort == 0 { // This will only scan forward in the service.Spec.Ports list because any matches // before the current port would have been found in svcPortToNodePort. This is really // looking for any user provided values. np := findRequestedNodePort(int(servicePort.Port), service.Spec.Ports) if np != 0 { err := nodePortOp.Allocate(np) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), np, err.Error())} return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el) } servicePort.NodePort = int32(np) svcPortToNodePort[int(servicePort.Port)] = np } else if assignNodePorts { nodePort, err := nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } servicePort.NodePort = int32(nodePort) svcPortToNodePort[int(servicePort.Port)] = nodePort } } else if int(servicePort.NodePort) != allocatedNodePort { if servicePort.NodePort == 0 { servicePort.NodePort = int32(allocatedNodePort) } else { err := nodePortOp.Allocate(int(servicePort.NodePort)) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())} return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el) } } } } if shouldCheckOrAssignHealthCheckNodePort(service) { var healthCheckNodePort int var err error if l, ok := service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort]; ok { healthCheckNodePort, err = strconv.Atoi(l) if err != nil || healthCheckNodePort <= 0 { return nil, errors.NewInternalError(fmt.Errorf("Failed to parse annotation %v: %v", apiservice.BetaAnnotationHealthCheckNodePort, err)) } } if healthCheckNodePort > 0 { // If the request has a health check nodePort in mind, attempt to reserve it err := nodePortOp.Allocate(int(healthCheckNodePort)) if err != nil { return nil, errors.NewInternalError(fmt.Errorf("Failed to allocate requested HealthCheck nodePort %v: %v", healthCheckNodePort, err)) } } else { // If the request has no health check nodePort specified, allocate any healthCheckNodePort, err = nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } // Insert the newly allocated health check port as an annotation (plan of record for Alpha) service.Annotations[apiservice.BetaAnnotationHealthCheckNodePort] = fmt.Sprintf("%d", healthCheckNodePort) } } out, err := rs.registry.CreateService(ctx, service) if err != nil { err = rest.CheckGeneratedNameError(Strategy, err, service) } if err == nil { el := nodePortOp.Commit() if el != nil { // these should be caught by an eventual reconciliation / restart glog.Errorf("error(s) committing service node-ports changes: %v", el) } releaseServiceIP = false } return out, err }