// createSchedulerServiceIfNeeded will create the specified service if it // doesn't already exist. func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error { ctx := api.NewDefaultContext() if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "k8sm", "component": "scheduler"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, SessionAffinity: api.ServiceAffinityNone, }, } if m.ServiceAddress != nil { svc.Spec.ClusterIP = m.ServiceAddress.String() } _, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
func TestUnprivilegedNewProject(t *testing.T) { _, clusterAdminKubeConfig, err := testutil.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } valerieClientConfig := *clusterAdminClientConfig valerieClientConfig.Username = "" valerieClientConfig.Password = "" valerieClientConfig.BearerToken = "" valerieClientConfig.CertFile = "" valerieClientConfig.KeyFile = "" valerieClientConfig.CertData = nil valerieClientConfig.KeyData = nil accessToken, err := tokencmd.RequestToken(&valerieClientConfig, nil, "valerie", "security!") if err != nil { t.Fatalf("unexpected error: %v", err) } valerieClientConfig.BearerToken = accessToken valerieOpenshiftClient, err := client.New(&valerieClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // confirm that we have access to request the project allowed, err := valerieOpenshiftClient.ProjectRequests().List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if allowed.Status != kapi.StatusSuccess { t.Fatalf("expected %v, got %v", kapi.StatusSuccess, allowed.Status) } requestProject := oc.NewProjectOptions{ ProjectName: "new-project", DisplayName: "display name here", Description: "the special description", Client: valerieOpenshiftClient, Out: ioutil.Discard, } if err := requestProject.Run(); err != nil { t.Fatalf("unexpected error: %v", err) } waitForProject(t, valerieOpenshiftClient, "new-project", 5*time.Second, 10) if err := requestProject.Run(); !kapierrors.IsAlreadyExists(err) { t.Fatalf("expected an already exists error, but got %v", err) } }
func (p *provision) Admit(a admission.Attributes) (err error) { defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } if mapping.Scope.Name() != meta.RESTScopeNameNamespace { return nil } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, Status: api.NamespaceStatus{}, } _, exists, err := p.store.Get(namespace) if err != nil { return admission.NewForbidden(a, err) } if exists { return nil } _, err = p.client.Namespaces().Create(namespace) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } return nil }
// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on // the hook parameters and deployment. The pod is then synchronously watched // until the pod completes, and if the pod failed, an error is returned. // // The hook pod inherits the following from the container the hook refers to: // // * Environment (hook keys take precedence) // * Working directory // * Resources func (e *HookExecutor) executeExecNewPod(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error { // Build a pod spec from the hook config and deployment podSpec, err := makeHookPod(hook, deployment, label) if err != nil { return err } // Try to create the pod. pod, err := e.PodClient.CreatePod(deployment.Namespace, podSpec) if err != nil { if !kerrors.IsAlreadyExists(err) { return fmt.Errorf("couldn't create lifecycle pod for %s: %v", deployutil.LabelForDeployment(deployment), err) } } else { glog.V(0).Infof("Created lifecycle pod %s for deployment %s", pod.Name, deployutil.LabelForDeployment(deployment)) } stopChannel := make(chan struct{}) defer close(stopChannel) nextPod := e.PodClient.PodWatch(pod.Namespace, pod.Name, pod.ResourceVersion, stopChannel) glog.V(0).Infof("Waiting for hook pod %s/%s to complete", pod.Namespace, pod.Name) for { pod := nextPod() switch pod.Status.Phase { case kapi.PodSucceeded: return nil case kapi.PodFailed: return fmt.Errorf(pod.Status.Message) } } }
// createMasterServiceIfNeeded will create the specified service if it // doesn't already exist. func (m *Master) createMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePort int) error { ctx := api.NewDefaultContext() if _, err := m.serviceRegistry.GetService(ctx, serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "kubernetes", "component": "apiserver"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, PortalIP: serviceIP.String(), SessionAffinity: api.AffinityTypeNone, }, } _, err := m.storage["services"].(rest.Creater).Create(ctx, svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// RegisterNodes registers the given list of nodes, it keeps retrying for `retryCount` times. func (s *NodeController) RegisterNodes(nodes *api.NodeList, retryCount int, retryInterval time.Duration) error { if len(nodes.Items) == 0 { return nil } registered := util.NewStringSet() nodes = s.canonicalizeName(nodes) for i := 0; i < retryCount; i++ { for _, node := range nodes.Items { if registered.Has(node.Name) { continue } _, err := s.kubeClient.Nodes().Create(&node) if err == nil || apierrors.IsAlreadyExists(err) { registered.Insert(node.Name) glog.Infof("Registered node in registry: %s", node.Name) } else { glog.Errorf("Error registering node %s, retrying: %s", node.Name, err) } if registered.Len() == len(nodes.Items) { glog.Infof("Successfully registered all nodes") return nil } } time.Sleep(retryInterval) } if registered.Len() != len(nodes.Items) { return ErrRegistration } else { return nil } }
// CreateMasterServiceIfNeeded will create the specified service if it // doesn't already exist. func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP net.IP, servicePort int) error { ctx := api.NewDefaultContext() if _, err := c.ServiceRegistry.GetService(ctx, serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "kubernetes", "component": "apiserver"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP, TargetPort: util.NewIntOrStringFromInt(servicePort)}}, // maintained by this code, not by the pod selector Selector: nil, ClusterIP: serviceIP.String(), SessionAffinity: api.ServiceAffinityNone, Type: api.ServiceTypeClusterIP, }, } if err := rest.BeforeCreate(rest.Services, ctx, svc); err != nil { return err } _, err := c.ServiceRegistry.CreateService(ctx, svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
func TestEtcdCreateControllerAlreadyExisting(t *testing.T) { ctx := api.NewDefaultContext() storage, fakeClient := newStorage(t) key, _ := makeControllerKey(ctx, validController.Name) fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &validController), 0) _, err := storage.Create(ctx, &validController) if !errors.IsAlreadyExists(err) { t.Errorf("expected already exists err, got %#v", err) } }
func TestEtcdCreateServiceAlreadyExisting(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Set("/registry/services/specs/foo", runtime.EncodeOrDie(latest.Codec, &api.Service{JSONBase: api.JSONBase{ID: "foo"}}), 0) registry := NewTestEtcdRegistry(fakeClient) err := registry.CreateService(&api.Service{ JSONBase: api.JSONBase{ID: "foo"}, }) if !errors.IsAlreadyExists(err) { t.Errorf("expected already exists err, got %#v", err) } }
func TestEtcdCreateServiceAlreadyExisting(t *testing.T) { ctx := api.NewDefaultContext() fakeClient := tools.NewFakeEtcdClient(t) key, _ := makeServiceKey(ctx, "foo") fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.Service{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) registry := NewTestEtcdRegistry(fakeClient) err := registry.CreateService(ctx, &api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo"}, }) if !errors.IsAlreadyExists(err) { t.Errorf("expected already exists err, got %#v", err) } }
// ensureOpenShiftInfraNamespace is called as part of global policy initialization to ensure infra namespace exists func (c *MasterConfig) ensureOpenShiftInfraNamespace() { ns := c.Options.PolicyConfig.OpenShiftInfrastructureNamespace // Ensure namespace exists _, err := c.KubeClient().Namespaces().Create(&kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: ns}}) if err != nil && !kapierror.IsAlreadyExists(err) { glog.Errorf("Error creating namespace %s: %v", ns, err) } // Ensure service accounts exist serviceAccounts := []string{c.BuildControllerServiceAccount, c.DeploymentControllerServiceAccount, c.ReplicationControllerServiceAccount} for _, serviceAccountName := range serviceAccounts { _, err := c.KubeClient().ServiceAccounts(ns).Create(&kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: serviceAccountName}}) if err != nil && !kapierror.IsAlreadyExists(err) { glog.Errorf("Error creating service account %s/%s: %v", ns, serviceAccountName, err) } } // Ensure service account cluster role bindings exist clusterRolesToUsernames := map[string][]string{ bootstrappolicy.BuildControllerRoleName: {serviceaccount.MakeUsername(ns, c.BuildControllerServiceAccount)}, bootstrappolicy.DeploymentControllerRoleName: {serviceaccount.MakeUsername(ns, c.DeploymentControllerServiceAccount)}, bootstrappolicy.ReplicationControllerRoleName: {serviceaccount.MakeUsername(ns, c.ReplicationControllerServiceAccount)}, } roleAccessor := policy.NewClusterRoleBindingAccessor(c.ServiceAccountRoleBindingClient()) for clusterRole, usernames := range clusterRolesToUsernames { addRole := &policy.RoleModificationOptions{ RoleName: clusterRole, RoleBindingAccessor: roleAccessor, Users: usernames, } if err := addRole.AddRole(); err != nil { glog.Errorf("Could not add %v users to the %v cluster role: %v\n", ns, usernames, clusterRole, err) } else { glog.V(2).Infof("Added %v users to the %v cluster role: %v\n", usernames, clusterRole, err) } } }
func TestEtcdCreateControllerAlreadyExisting(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Set("/registry/controllers/foo", runtime.EncodeOrDie(latest.Codec, &api.ReplicationController{JSONBase: api.JSONBase{ID: "foo"}}), 0) registry := NewTestEtcdRegistry(fakeClient) err := registry.CreateController(&api.ReplicationController{ JSONBase: api.JSONBase{ ID: "foo", }, }) if !errors.IsAlreadyExists(err) { t.Errorf("expected already exists err, got %#v", err) } }
// CheckGeneratedNameError checks whether an error that occured creating a resource is due // to generation being unable to pick a valid name. func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { if !errors.IsAlreadyExists(err) { return err } objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return kerr } if len(objectMeta.GenerateName) == 0 { return err } return errors.NewServerTimeout(kind, "POST") }
func TestEtcdCreateControllerAlreadyExisting(t *testing.T) { ctx := api.NewDefaultContext() fakeClient := tools.NewFakeEtcdClient(t) registry := NewTestEtcdRegistry(fakeClient) key, _ := makeControllerKey(ctx, "foo") key = etcdtest.AddPrefix(key) fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) _, err := registry.CreateController(ctx, &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: "foo", }, }) if !errors.IsAlreadyExists(err) { t.Errorf("expected already exists err, got %#v", err) } }
func TestEtcdCreateAlreadyExisting(t *testing.T) { registry, _, _, fakeClient, _ := newStorage(t) ctx := api.NewDefaultContext() key, _ := registry.KeyFunc(ctx, "foo") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), }, }, E: nil, } _, err := registry.Create(ctx, validNewPod()) if !errors.IsAlreadyExists(err) { t.Errorf("Unexpected error returned: %#v", err) } }
// Create registers a new image (if it doesn't exist) and updates the specified ImageRepository's tags. func (s *REST) Create(obj interface{}) (<-chan interface{}, error) { mapping, ok := obj.(*api.ImageRepositoryMapping) if !ok { return nil, fmt.Errorf("not an image repository mapping: %#v", obj) } repo, err := s.findImageRepository(mapping.DockerImageRepository) if err != nil { return nil, err } if repo == nil { return nil, errors.NewInvalid("imageRepositoryMapping", mapping.ID, errors.ErrorList{ errors.NewFieldNotFound("DockerImageRepository", mapping.DockerImageRepository), }) } if errs := validation.ValidateImageRepositoryMapping(mapping); len(errs) > 0 { return nil, errors.NewInvalid("imageRepositoryMapping", mapping.ID, errs) } image := mapping.Image image.CreationTimestamp = util.Now() //TODO apply metadata overrides if repo.Tags == nil { repo.Tags = make(map[string]string) } repo.Tags[mapping.Tag] = image.ID return apiserver.MakeAsync(func() (interface{}, error) { err = s.imageRegistry.CreateImage(&image) if err != nil && !errors.IsAlreadyExists(err) { return nil, err } err = s.imageRepositoryRegistry.UpdateImageRepository(repo) if err != nil { return nil, err } return &kubeapi.Status{Status: kubeapi.StatusSuccess}, nil }), nil }
// createMasterNamespaceIfNeeded will create the namespace that contains the master services if it doesn't already exist func (m *Master) createMasterNamespaceIfNeeded(ns string) error { ctx := api.NewContext() if _, err := m.namespaceRegistry.GetNamespace(ctx, api.NamespaceDefault); err == nil { // the namespace already exists return nil } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: ns, Namespace: "", }, } _, err := m.storage["namespaces"].(rest.Creater).Create(ctx, namespace) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// CreateNamespaceIfNeeded will create the namespace that contains the master services if it doesn't already exist func (c *Controller) CreateNamespaceIfNeeded(ns string) error { ctx := api.NewContext() if _, err := c.NamespaceRegistry.GetNamespace(ctx, api.NamespaceDefault); err == nil { // the namespace already exists return nil } newNs := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: ns, Namespace: "", }, } err := c.NamespaceRegistry.CreateNamespace(ctx, newNs) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// Create registers a new image (if it doesn't exist) and updates the specified ImageRepository's tags. func (s *ImageRepositoryMappingStorage) Create(obj interface{}) (<-chan interface{}, error) { mapping, ok := obj.(*api.ImageRepositoryMapping) if !ok { return nil, fmt.Errorf("not an image repository mapping: %#v", obj) } repo, err := s.findImageRepository(mapping.DockerImageRepository) if err != nil { return nil, err } if repo == nil { return nil, fmt.Errorf("Unable to locate an image repository for '%s'", mapping.DockerImageRepository) } if errs := ValidateImageRepositoryMapping(mapping); len(errs) > 0 { return nil, kubeerrors.NewInvalid("imageRepositoryMapping", mapping.ID, errs) } image := mapping.Image image.CreationTimestamp = util.Now() //TODO apply metadata overrides if repo.Tags == nil { repo.Tags = make(map[string]string) } repo.Tags[mapping.Tag] = image.ID return apiserver.MakeAsync(func() (interface{}, error) { err = s.imageRegistry.CreateImage(image) if err != nil && !kubeerrors.IsAlreadyExists(err) { return nil, err } err = s.imageRepositoryRegistry.UpdateImageRepository(*repo) if err != nil { return nil, err } return &baseapi.Status{Status: baseapi.StatusSuccess}, nil }), nil }
func TestEtcdCreatePodWithContainersError(t *testing.T) { ctx := api.NewDefaultContext() fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true key, _ := makePodKey(ctx, "foo") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNotFound, } fakeClient.Data["/registry/nodes/machine/boundpods"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNodeExist, // validate that ApplyBinding is translating Create errors } registry := NewTestEtcdRegistry(fakeClient) err := registry.CreatePod(ctx, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Suddenly, a wild scheduler appears: err = registry.ApplyBinding(ctx, &api.Binding{PodID: "foo", Host: "machine"}) if !errors.IsAlreadyExists(err) { t.Fatalf("Unexpected error returned: %#v", err) } existingPod, err := registry.GetPod(ctx, "foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } if existingPod.Status.Host == "machine" { t.Fatal("Pod's host changed in response to an non-apply-able binding.") } }
func TestEtcdCreatePodAlreadyExisting(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Data["/registry/pods/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{JSONBase: api.JSONBase{ID: "foo"}}), }, }, E: nil, } registry := NewTestEtcdRegistry(fakeClient) err := registry.CreatePod(&api.Pod{ JSONBase: api.JSONBase{ ID: "foo", }, }) if !errors.IsAlreadyExists(err) { t.Errorf("Unexpected error returned: %#v", err) } }
// Create registers a new image (if it doesn't exist) and updates the specified ImageStream's tags. func (s *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil { return nil, err } mapping := obj.(*api.ImageStreamMapping) stream, err := s.findStreamForMapping(ctx, mapping) if err != nil { return nil, err } image := mapping.Image tag := mapping.Tag if len(tag) == 0 { tag = api.DefaultImageTag } if err := s.imageRegistry.CreateImage(ctx, &image); err != nil && !errors.IsAlreadyExists(err) { return nil, err } next := api.TagEvent{ Created: util.Now(), DockerImageReference: image.DockerImageReference, Image: image.Name, } if !api.AddTagEventToImageStream(stream, tag, next) { // nothing actually changed return &kapi.Status{Status: kapi.StatusSuccess}, nil } api.UpdateTrackingTags(stream, tag, next) if _, err := s.imageStreamRegistry.UpdateImageStreamStatus(ctx, stream); err != nil { return nil, err } return &kapi.Status{Status: kapi.StatusSuccess}, nil }
// recordEvent attempts to write event to a sink. It returns true if the event // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates // existing event. func recordEvent(sink EventSink, event *api.Event, updateExistingEvent bool) bool { var newEvent *api.Event var err error if updateExistingEvent { newEvent, err = sink.Update(event) } // Update can fail because the event may have been removed and it no longer exists. if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" newEvent, err = sink.Create(event) } if err == nil { addOrUpdateEvent(newEvent) return true } // If we can't contact the server, then hold everything while we keep trying. // Otherwise, something about the event is malformed and we should abandon it. switch err.(type) { case *client.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) { glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: // We don't expect this; it implies the server's response didn't match a // known pattern. Go ahead and retry. default: // This case includes actual http transport errors. Go ahead and retry. } glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return false }
func TestEtcdCreatePodWithContainersError(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true fakeClient.Data["/registry/pods/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNotFound, } fakeClient.Data["/registry/hosts/machine/kubelet"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, E: tools.EtcdErrorNodeExist, // validate that ApplyBinding is translating Create errors } registry := NewTestEtcdRegistry(fakeClient) err := registry.CreatePod(&api.Pod{ JSONBase: api.JSONBase{ ID: "foo", }, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } // Suddenly, a wild scheduler appears: err = registry.ApplyBinding(&api.Binding{PodID: "foo", Host: "machine"}) if !errors.IsAlreadyExists(err) { t.Fatalf("Unexpected error returned: %#v", err) } existingPod, err := registry.GetPod("foo") if err != nil { t.Fatalf("Unexpected error: %v", err) } if existingPod.DesiredState.Host == "machine" { t.Fatal("Pod's host changed in response to an non-apply-able binding.") } }
func TestEtcdCreatePodAlreadyExisting(t *testing.T) { ctx := api.NewDefaultContext() fakeClient := tools.NewFakeEtcdClient(t) key, _ := makePodKey(ctx, "foo") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), }, }, E: nil, } registry := NewTestEtcdRegistry(fakeClient) err := registry.CreatePod(ctx, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", }, }) if !errors.IsAlreadyExists(err) { t.Errorf("Unexpected error returned: %#v", err) } }
func (p *provisioningIdentityMapper) userForWithRetries(info authapi.UserIdentityInfo, allowedRetries int) (kuser.Info, error) { ctx := kapi.NewContext() identity, err := p.identity.GetIdentity(ctx, info.GetIdentityName()) if kerrs.IsNotFound(err) { user, err := p.createIdentityAndMapping(ctx, info) // Only retry for AlreadyExists errors, which can occur in the following cases: // * The same user was created by another identity provider with the same preferred username // * The same user was created by another instance of this identity provider // * The same identity was created by another instance of this identity provider if kerrs.IsAlreadyExists(err) && allowedRetries > 0 { return p.userForWithRetries(info, allowedRetries-1) } return user, err } if err != nil { return nil, err } return p.getMapping(ctx, identity) }
func TestEtcdCreateAlreadyExistsRoutes(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Data[makeTestDefaultRouteKey("foo")] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Route{ObjectMeta: kapi.ObjectMeta{Name: "foo"}}), }, }, E: nil, } registry := NewTestEtcd(fakeClient) err := registry.CreateRoute(kapi.NewDefaultContext(), &api.Route{ ObjectMeta: kapi.ObjectMeta{ Name: "foo", }, }) if err == nil { t.Error("Unexpected non-error") } if !errors.IsAlreadyExists(err) { t.Errorf("Expected 'already exists' error, got %#v", err) } }
func TestEtcdCreateImageRepositoryAlreadyExists(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Data["/imageRepositories/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(api.ImageRepository{JSONBase: kubeapi.JSONBase{ID: "foo"}}), }, }, E: nil, } registry := NewTestEtcdRegistry(fakeClient) err := registry.CreateImageRepository(&api.ImageRepository{ JSONBase: kubeapi.JSONBase{ ID: "foo", }, }) if err == nil { t.Error("Unexpected non-error") } if !kubeerrors.IsAlreadyExists(err) { t.Errorf("Expected 'already exists' error, got %#v", err) } }
func TestCreateExists(t *testing.T) { user, identity := makeAssociated() expectedActions := []test.Action{ {"GetIdentity", identity.Name}, {"GetUser", user.Name}, } mapping := &api.UserIdentityMapping{ Identity: kapi.ObjectReference{Name: identity.Name}, User: kapi.ObjectReference{Name: user.Name}, } actions, _, _, rest := setupRegistries(identity, user) _, err := rest.Create(kapi.NewContext(), mapping) if err == nil { t.Errorf("Expected error, got none") } if !kerrs.IsAlreadyExists(err) { t.Errorf("Unexpected error: %v", err) } verifyActions(expectedActions, *actions, t) }
func TestCreateAlreadyExists(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.TestIndex = true storage := NewREST(helper) existingImage := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: "foo", ResourceVersion: "1", }, DockerImageReference: "foo/bar:abcd1234", } fakeEtcdClient.Data["/images/foo"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, existingImage), CreatedIndex: 1, ModifiedIndex: 1, }, }, } _, err := storage.Create(kapi.NewDefaultContext(), &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: "foo", }, DockerImageReference: "foo/bar:abcd1234", }) if err == nil { t.Fatalf("Unexpected non error") } if !errors.IsAlreadyExists(err) { t.Errorf("Expected already exists error, got %s", err) } }