// CreateOrUpdate attempts to update the current etcd state with the provided // allocation. func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error { e.lock.Lock() defer e.lock.Unlock() last := "" err := e.storage.GuaranteedUpdate(context.TODO(), e.baseKey, &api.RangeAllocation{}, true, nil, storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) switch { case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0: if snapshot.ResourceVersion != existing.ResourceVersion { return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("the provided resource version does not match")) } case len(existing.ResourceVersion) != 0: return nil, k8serr.NewConflict(e.resource, "", fmt.Errorf("another caller has already initialized the resource")) } last = snapshot.ResourceVersion return snapshot, nil }), ) if err != nil { return storeerr.InterpretUpdateError(err, e.resource, "") } err = e.alloc.Restore(snapshot.Range, snapshot.Data) if err == nil { e.last = last } return err }
// Update replaces a given Route instance with an existing instance in rs.registry. func (rs *REST) Update(ctx kapi.Context, obj runtime.Object) (runtime.Object, bool, error) { route, ok := obj.(*api.Route) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("not a route: %#v", obj)) } if !kapi.ValidNamespace(ctx, &route.ObjectMeta) { return nil, false, errors.NewConflict("route", route.Namespace, fmt.Errorf("Route.Namespace does not match the provided context")) } old, err := rs.Get(ctx, route.Name) if err != nil { return nil, false, err } if errs := validation.ValidateRouteUpdate(route, old.(*api.Route)); len(errs) > 0 { return nil, false, errors.NewInvalid("route", route.Name, errs) } // TODO: Convert to generic etcd // TODO: Call ValidateRouteUpdate->ValidateObjectMetaUpdate // TODO: In the UpdateStrategy.PrepareForUpdate, set the HostGeneratedAnnotationKey annotation to "false" if the updated route object modifies the host err = rs.registry.UpdateRoute(ctx, route) if err != nil { return nil, false, err } out, err := rs.registry.GetRoute(ctx, route.Name) return out, false, err }
// ResolveImageID returns latest TagEvent for specified imageID and an error if // there's more than one image matching the ID or when one does not exist. func ResolveImageID(stream *ImageStream, imageID string) (*TagEvent, error) { var event *TagEvent set := sets.NewString() for _, history := range stream.Status.Tags { for i := range history.Items { tagging := &history.Items[i] if d, err := digest.ParseDigest(tagging.Image); err == nil { if strings.HasPrefix(d.Hex(), imageID) || strings.HasPrefix(tagging.Image, imageID) { event = tagging set.Insert(tagging.Image) } continue } if strings.HasPrefix(tagging.Image, imageID) { event = tagging set.Insert(tagging.Image) } } } switch len(set) { case 1: return &TagEvent{ Created: unversioned.Now(), DockerImageReference: event.DockerImageReference, Image: event.Image, }, nil case 0: return nil, errors.NewNotFound(Resource("imagestreamimage"), imageID) default: return nil, errors.NewConflict(Resource("imagestreamimage"), imageID, fmt.Errorf("multiple images match the prefix %q: %s", imageID, strings.Join(set.List(), ", "))) } }
func TestImageStreamImportUnsupported(t *testing.T) { testCases := []struct { status unversioned.Status errFn func(err error) bool }{ { status: errors.NewNotFound("", "").(kclient.APIStatus).Status(), errFn: func(err error) bool { return err == ErrImageStreamImportUnsupported }, }, { status: errors.NewNotFound("Other", "").(kclient.APIStatus).Status(), errFn: func(err error) bool { return err != ErrImageStreamImportUnsupported && errors.IsNotFound(err) }, }, { status: errors.NewConflict("Other", "", nil).(kclient.APIStatus).Status(), errFn: func(err error) bool { return err != ErrImageStreamImportUnsupported && errors.IsConflict(err) }, }, } for i, test := range testCases { c, err := New(&kclient.Config{ Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) { buf := bytes.NewBuffer([]byte(runtime.EncodeOrDie(latest.GroupOrDie("").Codec, &test.status))) return &http.Response{StatusCode: http.StatusNotFound, Body: ioutil.NopCloser(buf)}, nil }), }) if err != nil { t.Fatal(err) } if _, err := c.ImageStreams("test").Import(&api.ImageStreamImport{}); !test.errFn(err) { t.Errorf("%d: error: %v", i, err) } } }
// Update updates a LimitRange object. func (rs *REST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) { limitRange, ok := obj.(*api.LimitRange) if !ok { return nil, false, fmt.Errorf("invalid object type") } if !api.ValidNamespace(ctx, &limitRange.ObjectMeta) { return nil, false, errors.NewConflict("limitRange", limitRange.Namespace, fmt.Errorf("LimitRange.Namespace does not match the provided context")) } oldObj, err := rs.registry.Get(ctx, limitRange.Name) if err != nil { return nil, false, err } editLimitRange := oldObj.(*api.LimitRange) // set the editable fields on the existing object editLimitRange.Labels = limitRange.Labels editLimitRange.ResourceVersion = limitRange.ResourceVersion editLimitRange.Annotations = limitRange.Annotations editLimitRange.Spec = limitRange.Spec if errs := validation.ValidateLimitRange(editLimitRange); len(errs) > 0 { return nil, false, errors.NewInvalid("limitRange", editLimitRange.Name, errs) } if err := rs.registry.UpdateWithName(ctx, editLimitRange.Name, editLimitRange); err != nil { return nil, false, err } out, err := rs.registry.Get(ctx, editLimitRange.Name) return out, false, err }
func TestStatusBackoffOnConflict(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&(errors.NewConflict(kapi.Resource("Route"), "route1", nil).ErrStatus)) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }) checkResult(t, err, c, admitter, "route1.test.local", now, nil, 0, 0) }
func TestControllerError(t *testing.T) { testCases := map[string]struct { err func() error errFn func(err error) bool reactFn testclient.ReactionFunc actions int }{ "not found": { err: func() error { return errors.NewNotFound("namespace", "test") }, errFn: func(err error) bool { return err == nil }, actions: 1, }, "unknown": { err: func() error { return fmt.Errorf("unknown") }, errFn: func(err error) bool { return err.Error() == "unknown" }, actions: 1, }, "conflict": { actions: 4, reactFn: func(a testclient.Action) (runtime.Object, error) { if a.Matches("get", "namespaces") { return &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}}, nil } return (*kapi.Namespace)(nil), errors.NewConflict("namespace", "test", fmt.Errorf("test conflict")) }, errFn: func(err error) bool { return err != nil && strings.Contains(err.Error(), "unable to allocate security info") }, }, } for s, testCase := range testCases { client := &testclient.Fake{ReactFn: testCase.reactFn} if client.ReactFn == nil { client.ReactFn = func(a testclient.Action) (runtime.Object, error) { return (*kapi.Namespace)(nil), testCase.err() } } uidr, _ := uid.NewRange(10, 19, 2) mcsr, _ := mcs.NewRange("s0:", 10, 2) uida := uidallocator.NewInMemory(uidr) c := Allocation{ uid: uida, mcs: DefaultMCSAllocation(uidr, mcsr, 5), client: client.Namespaces(), } err := c.Next(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: "test"}}) if !testCase.errFn(err) { t.Errorf("%s: unexpected error: %v", s, err) } if len(client.Actions()) != testCase.actions { t.Errorf("%s: expected %d actions: %v", s, testCase.actions, client.Actions()) } if uida.Free() != 5 { t.Errorf("%s: should not have allocated uid: %d/%d", s, uida.Free(), uidr.Size()) } } }
func TestStatusBackoffOnConflict(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&routeapi.Route{ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}}) c.PrependReactor("update", "routes", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } return true, nil, errors.NewConflict(kapi.Resource("Route"), "route1", nil) }) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }) checkResult(t, err, c, admitter, "route1.test.local", now, nil, 0, 0) }
func (r *ScaleREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) { if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } scale, ok := obj.(*experimental.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } rc, err := (*r.registry).GetController(ctx, scale.Name) if err != nil { return nil, false, errors.NewNotFound("scale", scale.Name) } rc.Spec.Replicas = scale.Spec.Replicas rc, err = (*r.registry).UpdateController(ctx, rc) if err != nil { return nil, false, errors.NewConflict("scale", scale.Name, err) } return &experimental.Scale{ ObjectMeta: api.ObjectMeta{ Name: rc.Name, Namespace: rc.Namespace, CreationTimestamp: rc.CreationTimestamp, }, Spec: experimental.ScaleSpec{ Replicas: rc.Spec.Replicas, }, Status: experimental.ScaleStatus{ Replicas: rc.Status.Replicas, Selector: rc.Spec.Selector, }, }, false, nil }
func (r *ScaleREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { rc, err := (*r.registry).GetController(ctx, name) if err != nil { return nil, false, errors.NewNotFound(extensions.Resource("replicationcontrollers/scale"), name) } oldScale := scaleFromRC(rc) obj, err := objInfo.UpdatedObject(ctx, oldScale) if obj == nil { return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale")) } scale, ok := obj.(*extensions.Scale) if !ok { return nil, false, errors.NewBadRequest(fmt.Sprintf("wrong object passed to Scale update: %v", obj)) } if errs := extvalidation.ValidateScale(scale); len(errs) > 0 { return nil, false, errors.NewInvalid(extensions.Kind("Scale"), scale.Name, errs) } rc.Spec.Replicas = scale.Spec.Replicas rc.ResourceVersion = scale.ResourceVersion rc, err = (*r.registry).UpdateController(ctx, rc) if err != nil { return nil, false, errors.NewConflict(extensions.Resource("replicationcontrollers/scale"), scale.Name, err) } return scaleFromRC(rc), false, nil }
// Create a LimitRange object func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { limitRange, ok := obj.(*api.LimitRange) if !ok { return nil, fmt.Errorf("invalid object type") } if !api.ValidNamespace(ctx, &limitRange.ObjectMeta) { return nil, errors.NewConflict("limitRange", limitRange.Namespace, fmt.Errorf("LimitRange.Namespace does not match the provided context")) } if len(limitRange.Name) == 0 { limitRange.Name = string(util.NewUUID()) } if errs := validation.ValidateLimitRange(limitRange); len(errs) > 0 { return nil, errors.NewInvalid("limitRange", limitRange.Name, errs) } api.FillObjectMetaSystemFields(ctx, &limitRange.ObjectMeta) err := rs.registry.CreateWithName(ctx, limitRange.Name, limitRange) if err != nil { return nil, err } return rs.registry.Get(ctx, limitRange.Name) }
// createDockerPullSecretReference updates a service account to reference the dockercfgSecret as a Secret and an ImagePullSecret func (e *DockercfgController) createDockerPullSecretReference(staleServiceAccount *api.ServiceAccount, dockercfgSecretName string) error { liveServiceAccount, err := e.client.ServiceAccounts(staleServiceAccount.Namespace).Get(staleServiceAccount.Name) if err != nil { return err } mountableDockercfgSecrets, imageDockercfgPullSecrets := getGeneratedDockercfgSecretNames(liveServiceAccount) staleDockercfgMountableSecrets, staleImageDockercfgPullSecrets := getGeneratedDockercfgSecretNames(staleServiceAccount) // if we're trying to create a reference based on stale lists of dockercfg secrets, let the caller know if !reflect.DeepEqual(staleDockercfgMountableSecrets.List(), mountableDockercfgSecrets.List()) || !reflect.DeepEqual(staleImageDockercfgPullSecrets.List(), imageDockercfgPullSecrets.List()) { return kapierrors.NewConflict(api.Resource("serviceaccount"), staleServiceAccount.Name, fmt.Errorf("cannot add reference to %s based on stale data. decision made for %v,%v, but live version is %v,%v", dockercfgSecretName, staleDockercfgMountableSecrets.List(), staleImageDockercfgPullSecrets.List(), mountableDockercfgSecrets.List(), imageDockercfgPullSecrets.List())) } changed := false if !mountableDockercfgSecrets.Has(dockercfgSecretName) { liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, api.ObjectReference{Name: dockercfgSecretName}) changed = true } if !imageDockercfgPullSecrets.Has(dockercfgSecretName) { liveServiceAccount.ImagePullSecrets = append(liveServiceAccount.ImagePullSecrets, api.LocalObjectReference{Name: dockercfgSecretName}) changed = true } if changed { if _, err = e.client.ServiceAccounts(liveServiceAccount.Namespace).Update(liveServiceAccount); err != nil { // TODO: retry on API conflicts in case the conflict was unrelated to our generated dockercfg secrets? return err } } return nil }
func (p *testPatcher) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) { inPod := obj.(*api.Pod) if inPod.ResourceVersion != p.updatePod.ResourceVersion { return nil, false, apierrors.NewConflict(api.Resource("pods"), inPod.Name, fmt.Errorf("existing %v, new %v", p.updatePod.ResourceVersion, inPod.ResourceVersion)) } return inPod, false, nil }
// InterpretUpdateError converts a generic etcd error on a update // operation into the appropriate API error. func InterpretUpdateError(err error, kind, name string) error { switch { case etcdstorage.IsEtcdTestFailed(err), etcdstorage.IsEtcdNodeExist(err): return errors.NewConflict(kind, name, err) default: return err } }
func (c *ErrorDeployments) Update(deployment *extensions.Deployment) (*extensions.Deployment, error) { switch { case c.invalid: return nil, kerrors.NewInvalid(api.Kind(deployment.Kind), deployment.Name, nil) case c.conflict: return nil, kerrors.NewConflict(api.Resource(deployment.Kind), deployment.Name, nil) } return nil, errors.New("deployment update failure") }
func (c *ErrorReplicationControllers) Update(controller *api.ReplicationController) (*api.ReplicationController, error) { switch { case c.invalid: return nil, kerrors.NewInvalid(api.Kind(controller.Kind), controller.Name, nil) case c.conflict: return nil, kerrors.NewConflict(api.Resource(controller.Kind), controller.Name, nil) } return nil, errors.New("Replication controller update failure") }
func (c *ErrorJobs) Update(job *batch.Job) (*batch.Job, error) { switch { case c.invalid: return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil) case c.conflict: return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil) } return nil, errors.New("Job update failure") }
func TestRetryOnConflict(t *testing.T) { opts := wait.Backoff{Factor: 1.0, Steps: 3} conflictErr := errors.NewConflict(unversioned.GroupResource{Resource: "test"}, "other", nil) // never returns err := RetryOnConflict(opts, func() error { return conflictErr }) if err != conflictErr { t.Errorf("unexpected error: %v", err) } // returns immediately i := 0 err = RetryOnConflict(opts, func() error { i++ return nil }) if err != nil || i != 1 { t.Errorf("unexpected error: %v", err) } // returns immediately on error testErr := fmt.Errorf("some other error") err = RetryOnConflict(opts, func() error { return testErr }) if err != testErr { t.Errorf("unexpected error: %v", err) } // keeps retrying i = 0 err = RetryOnConflict(opts, func() error { if i < 2 { i++ return errors.NewConflict(unversioned.GroupResource{Resource: "test"}, "other", nil) } return nil }) if err != nil || i != 2 { t.Errorf("unexpected error: %v", err) } }
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object // should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted // (and the provided grace period is longer than the time to deletion), and an error is returned if the // condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with // default values if graceful is true. Second place where we set deletionTimestamp is pkg/registry/generic/registry/store.go // this function is responsible for setting deletionTimestamp during gracefulDeletion, other one for cascading deletions. func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return false, false, kerr } // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID { return false, false, errors.NewConflict(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID)) } gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) if !ok { // If we're not deleting gracefully there's no point in updating Generation, as we won't update // the obcject before deleting it. return false, false, nil } // if the object is already being deleted, no need to update generation. if objectMeta.DeletionTimestamp != nil { // if we are already being deleted, we may only shorten the deletion grace period // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, // so we force deletion immediately if objectMeta.DeletionGracePeriodSeconds == nil { return false, false, nil } // only a shorter grace period may be provided by a user if options.GracePeriodSeconds != nil { period := int64(*options.GracePeriodSeconds) if period >= *objectMeta.DeletionGracePeriodSeconds { return false, true, nil } newDeletionTimestamp := unversioned.NewTime( objectMeta.DeletionTimestamp.Add(-time.Second * time.Duration(*objectMeta.DeletionGracePeriodSeconds)). Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &newDeletionTimestamp objectMeta.DeletionGracePeriodSeconds = &period return true, false, nil } // graceful deletion is pending, do nothing options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds return false, true, nil } if !gracefulStrategy.CheckGracefulDelete(ctx, obj, options) { return false, false, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds // If it's the first graceful deletion we are going to set the DeletionTimestamp to non-nil. // Controllers of the object that's being deleted shouldn't take any nontrivial actions, hence its behavior changes. // Thus we need to bump object's Generation (if set). This handles generation bump during graceful deletion. // The bump for objects that don't support graceful deletion is handled in pkg/registry/generic/registry/store.go. if objectMeta.Generation > 0 { objectMeta.Generation++ } return true, false, nil }
// InterpretUpdateError converts a generic etcd error on a update // operation into the appropriate API error. func InterpretUpdateError(err error, kind, name string) error { switch { case etcdutil.IsEtcdTestFailed(err), etcdutil.IsEtcdNodeExist(err): return errors.NewConflict(kind, name, err) case etcdutil.IsEtcdUnreachable(err): return errors.NewServerTimeout(kind, "update", 2) // TODO: make configurable or handled at a higher level default: return err } }
// InterpretUpdateError converts a generic error on a update // operation into the appropriate API error. func InterpretUpdateError(err error, qualifiedResource unversioned.GroupResource, name string) error { switch { case storage.IsTestFailed(err), storage.IsNodeExist(err): return errors.NewConflict(qualifiedResource, name, err) case storage.IsUnreachable(err): return errors.NewServerTimeout(qualifiedResource, "update", 2) // TODO: make configurable or handled at a higher level default: return err } }
func (r *RollbackREST) rollbackDeployment(ctx api.Context, deploymentID string, config *extensions.RollbackConfig, annotations map[string]string) (err error) { if _, err = r.setDeploymentRollback(ctx, deploymentID, config, annotations); err != nil { err = etcderr.InterpretGetError(err, extensions.Resource("deployments"), deploymentID) err = etcderr.InterpretUpdateError(err, extensions.Resource("deployments"), deploymentID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewConflict(extensions.Resource("deployments/rollback"), deploymentID, err) } } return }
// assignPod assigns the given pod to the given machine. func (r *BindingREST) assignPod(ctx api.Context, podID string, machine string, annotations map[string]string, cpuSet string, network api.Network) (err error) { if _, err = r.setPodHostAndAnnotations(ctx, podID, "", machine, annotations, cpuSet, network); err != nil { err = etcderr.InterpretGetError(err, api.Resource("pods"), podID) err = etcderr.InterpretUpdateError(err, api.Resource("pods"), podID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewConflict(api.Resource("pods/binding"), podID, err) } } return }
// assignPod assigns the given pod to the given machine. func (r *BindingREST) assignPod(ctx api.Context, podID string, machine string, annotations map[string]string) (err error) { if _, err = r.setPodHostAndAnnotations(ctx, podID, "", machine, annotations); err != nil { err = etcderr.InterpretGetError(err, "pod", podID) err = etcderr.InterpretUpdateError(err, "pod", podID) if _, ok := err.(*errors.StatusError); !ok { err = errors.NewConflict("binding", podID, err) } } return }
func TestApplyRetry(t *testing.T) { initTestErrorHandler(t) nameRC, currentRC := readAndAnnotateReplicationController(t, filenameRC) pathRC := "/namespaces/test/replicationcontrollers/" + nameRC firstPatch := true retry := false getCount := 0 f, tf, _, ns := cmdtesting.NewAPIFactory() tf.Printer = &testPrinter{} tf.Client = &fake.RESTClient{ NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == pathRC && m == "GET": getCount++ bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil case p == pathRC && m == "PATCH": if firstPatch { firstPatch = false statusErr := kubeerr.NewConflict(schema.GroupResource{Group: "", Resource: "rc"}, "test-rc", fmt.Errorf("the object has been modified. Please apply at first.")) bodyBytes, _ := json.Marshal(statusErr) bodyErr := ioutil.NopCloser(bytes.NewReader(bodyBytes)) return &http.Response{StatusCode: http.StatusConflict, Header: defaultHeader(), Body: bodyErr}, nil } retry = true validatePatchApplication(t, req) bodyRC := ioutil.NopCloser(bytes.NewReader(currentRC)) return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: bodyRC}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } tf.Namespace = "test" buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) cmd := NewCmdApply(f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) if !retry || getCount != 2 { t.Fatalf("apply didn't retry when get conflict error") } // uses the name from the file, not the response expectRC := "replicationcontroller/" + nameRC + "\n" if buf.String() != expectRC { t.Fatalf("unexpected output: %s\nexpected: %s", buf.String(), expectRC) } }
// createBuild is responsible for validating build object and saving it and returning newly created object func (g *BuildGenerator) createBuild(ctx kapi.Context, build *buildapi.Build) (*buildapi.Build, error) { if !kapi.ValidNamespace(ctx, &build.ObjectMeta) { return nil, errors.NewConflict(buildapi.Resource("build"), build.Namespace, fmt.Errorf("Build.Namespace does not match the provided context")) } kapi.FillObjectMetaSystemFields(ctx, &build.ObjectMeta) err := g.Client.CreateBuild(ctx, build) if err != nil { return nil, err } return g.Client.GetBuild(ctx, build.Name) }
// TestCreateRetryConflictTagDiff ensures that attempts to create a mapping // that result in resource conflicts that DO contain tag diffs causes the // conflict error to be returned. func TestCreateRetryConflictTagDiff(t *testing.T) { firstGet := true firstUpdate := true rest := &REST{ strategy: NewStrategy(testDefaultRegistry), imageRegistry: &fakeImageRegistry{ createImage: func(ctx kapi.Context, image *api.Image) error { return nil }, }, imageStreamRegistry: &fakeImageStreamRegistry{ getImageStream: func(ctx kapi.Context, id string) (*api.ImageStream, error) { // For the first get, return a stream with a latest tag pointing to "original" if firstGet { firstGet = false stream := validImageStream() stream.Status = api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:original"}}}, }, } return stream, nil } // For subsequent gets, return a stream with the latest tag changed to "newer" stream := validImageStream() stream.Status = api.ImageStreamStatus{ Tags: map[string]api.TagEventList{ "latest": {Items: []api.TagEvent{{DockerImageReference: "localhost:5000/someproject/somerepo:newer"}}}, }, } return stream, nil }, updateImageStreamStatus: func(ctx kapi.Context, repo *api.ImageStream) (*api.ImageStream, error) { // For the first update, return a conflict so that the stream // get/compare is retried. if firstUpdate { firstUpdate = false return nil, errors.NewConflict(api.Resource("imagestreams"), repo.Name, fmt.Errorf("resource modified")) } return repo, nil }, }, } obj, err := rest.Create(kapi.NewDefaultContext(), validNewMappingWithName()) if err == nil { t.Fatalf("expected an error") } if !errors.IsConflict(err) { t.Errorf("expected a conflict error, got %v", err) } if obj != nil { t.Fatalf("expected a nil result") } }
func TestStatusRecordRejectionConflict(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&routeapi.Route{ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}}) c.PrependReactor("update", "routes", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } return true, nil, errors.NewConflict(kapi.Resource("Route"), "route1", nil) }) admitter := NewStatusAdmitter(p, c, "test") admitter.RecordRouteRejection(&routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }, "Failed", "generic error") if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource().Resource != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(core.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 || obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != now || condition.Status != kapi.ConditionFalse || condition.Reason != "Failed" || condition.Message != "generic error" { t.Fatalf("unexpected condition: %#v", condition) } if v, ok := admitter.expected.Peek(types.UID("uid1")); ok { t.Fatalf("expected empty time: %#v", v) } }
// InterpretDeleteError converts a generic error on a delete // operation into the appropriate API error. func InterpretDeleteError(err error, qualifiedResource schema.GroupResource, name string) error { switch { case storage.IsNotFound(err): return errors.NewNotFound(qualifiedResource, name) case storage.IsUnreachable(err): return errors.NewServerTimeout(qualifiedResource, "delete", 2) // TODO: make configurable or handled at a higher level case storage.IsTestFailed(err), storage.IsNodeExist(err), storage.IsInvalidObj(err): return errors.NewConflict(qualifiedResource, name, err) case storage.IsInternalError(err): return errors.NewInternalError(err) default: return err } }
func TestStatusBackoffOnConflict(t *testing.T) { now := unversioned.Now() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&(errors.NewConflict(kapi.Resource("Route"), "route1", nil).(*errors.StatusError).ErrStatus)) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionFalse, LastTransitionTime: &touched, }, }, }, }, }, }) if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource() != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(ktestclient.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 && obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != now || condition.Status != kapi.ConditionTrue || condition.Reason != "" { t.Fatalf("unexpected condition: %#v", condition) } if err == nil { t.Fatalf("unexpected non-error: %#v", admitter.expected) } if v, ok := admitter.expected.Peek(types.UID("uid1")); !ok || !reflect.DeepEqual(v, time.Time{}) { t.Fatalf("expected empty time: %#v", v) } }