コード例 #1
0
ファイル: etcd_master.go プロジェクト: K-A-Z/kubernetes
// handleMaster performs one loop of master locking.
// on success it returns <master>, nil
// on error it returns "", err
// in situations where you should try again due to concurrent state changes (e.g. another actor simultaneously acquiring the lock)
// it returns "", nil
func (e *etcdMasterElector) handleMaster(path, id string, ttl uint64) (string, error) {
	res, err := e.etcd.Get(path, false, false)

	// Unexpected error, bail out
	if err != nil && !tools.IsEtcdNotFound(err) {
		return "", err
	}

	// There is no master, try to become the master.
	if err != nil && tools.IsEtcdNotFound(err) {
		return e.becomeMaster(path, id, ttl)
	}

	// This should never happen.
	if res.Node == nil {
		return "", fmt.Errorf("unexpected response: %#v", res)
	}

	// We're not the master, just return the current value
	if res.Node.Value != id {
		return res.Node.Value, nil
	}

	// We are the master, try to extend out lease
	return e.extendMaster(path, id, ttl, res)
}
コード例 #2
0
// DeletePod deletes an existing pod specified by its ID.
func (registry *EtcdRegistry) DeletePod(podID string) error {
	var pod api.Pod
	podKey := makePodKey(podID)
	err := registry.helper.ExtractObj(podKey, &pod, false)
	if tools.IsEtcdNotFound(err) {
		return apiserver.NewNotFoundErr("pod", podID)
	}
	if err != nil {
		return err
	}

	// First delete the pod, so a scheduler doesn't notice it getting removed from the
	// machine and attempt to put it somewhere.
	err = registry.helper.Delete(podKey, true)
	if tools.IsEtcdNotFound(err) {
		return apiserver.NewNotFoundErr("pod", podID)
	}
	if err != nil {
		return err
	}

	machine := pod.DesiredState.Host
	if machine == "" {
		// Pod was never scheduled anywhere, just return.
		return nil
	}

	// Next, remove the pod from the machine atomically.
	contKey := makeContainerKey(machine)
	return registry.helper.AtomicUpdate(contKey, &api.ContainerManifestList{}, func(in interface{}) (interface{}, error) {
		manifests := in.(*api.ContainerManifestList)
		newManifests := make([]api.ContainerManifest, 0, len(manifests.Items))
		found := false
		for _, manifest := range manifests.Items {
			if manifest.ID != podID {
				newManifests = append(newManifests, manifest)
			} else {
				found = true
			}
		}
		if !found {
			// This really shouldn't happen, it indicates something is broken, and likely
			// there is a lost pod somewhere.
			// However it is "deleted" so log it and move on
			glog.Infof("Couldn't find: %s in %#v", podID, manifests)
		}
		manifests.Items = newManifests
		return manifests, nil
	})
}
コード例 #3
0
ファイル: etcd.go プロジェクト: hungld/kubernetes
// DeleteService deletes a Service specified by its name.
func (r *Registry) DeleteService(name string) error {
	key := makeServiceKey(name)
	err := r.Delete(key, true)
	if tools.IsEtcdNotFound(err) {
		return errors.NewNotFound("service", name)
	}
	if err != nil {
		return err
	}
	key = makeServiceEndpointsKey(name)
	err = r.Delete(key, true)
	if !tools.IsEtcdNotFound(err) {
		return err
	}
	return nil
}
コード例 #4
0
ファイル: etcd_registry.go プロジェクト: nqn/kubernetes
func (registry *EtcdRegistry) deletePodFromMachine(machine, podID string) error {
	// First delete the pod, so a scheduler doesn't notice it getting removed from the
	// machine and attempt to put it somewhere.
	podKey := makePodKey(machine, podID)
	_, err := registry.etcdClient.Delete(podKey, true)
	if tools.IsEtcdNotFound(err) {
		return apiserver.NewNotFoundErr("pod", podID)
	}
	if err != nil {
		return err
	}

	// Next, remove the pod from the machine atomically.
	contKey := makeContainerKey(machine)
	return registry.helper().AtomicUpdate(contKey, &[]api.ContainerManifest{}, func(in interface{}) (interface{}, error) {
		manifests := *in.(*[]api.ContainerManifest)
		newManifests := make([]api.ContainerManifest, 0, len(manifests))
		found := false
		for _, manifest := range manifests {
			if manifest.ID != podID {
				newManifests = append(newManifests, manifest)
			} else {
				found = true
			}
		}
		if !found {
			// This really shouldn't happen, it indicates something is broken, and likely
			// there is a lost pod somewhere.
			// However it is "deleted" so log it and move on
			glog.Infof("Couldn't find: %s in %#v", podID, manifests)
		}
		return newManifests, nil
	})
}
コード例 #5
0
ファイル: etcd.go プロジェクト: pombredanne/atomic-enterprise
// GetAndTestEtcdClient creates an etcd client based on the provided config. It will attempt to
// connect to the etcd server and block until the server responds at least once, or return an
// error if the server never responded.
func GetAndTestEtcdClient(etcdClientInfo configapi.EtcdConnectionInfo) (*etcdclient.Client, error) {
	// etcd does a poor job of setting up the transport - use the Kube client stack
	transport, err := client.TransportFor(&client.Config{
		TLSClientConfig: client.TLSClientConfig{
			CertFile: etcdClientInfo.ClientCert.CertFile,
			KeyFile:  etcdClientInfo.ClientCert.KeyFile,
			CAFile:   etcdClientInfo.CA,
		},
		WrapTransport: DefaultEtcdClientTransport,
	})
	if err != nil {
		return nil, err
	}

	etcdClient := etcdclient.NewClient(etcdClientInfo.URLs)
	etcdClient.SetTransport(transport.(*http.Transport))

	for i := 0; ; i++ {
		_, err := etcdClient.Get("/", false, false)
		if err == nil || tools.IsEtcdNotFound(err) {
			break
		}
		if i > 100 {
			return nil, fmt.Errorf("could not reach etcd: %v", err)
		}
		time.Sleep(50 * time.Millisecond)
	}

	return etcdClient, nil
}
コード例 #6
0
// DeleteService deletes a Service specified by its name.
func (registry *EtcdRegistry) DeleteService(name string) error {
	key := makeServiceKey(name)
	_, err := registry.etcdClient.Delete(key, true)
	if tools.IsEtcdNotFound(err) {
		return apiserver.NewNotFoundErr("service", name)
	}
	if err != nil {
		return err
	}
	key = makeServiceEndpointsKey(name)
	_, err = registry.etcdClient.Delete(key, true)
	if !tools.IsEtcdNotFound(err) {
		return err
	}
	return nil
}
コード例 #7
0
func TestEtcdCreatePodWithContainersError(t *testing.T) {
	fakeClient := tools.MakeFakeEtcdClient(t)
	fakeClient.Data["/registry/hosts/machine/pods/foo"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: nil,
		},
		E: tools.EtcdErrorNotFound,
	}
	fakeClient.Data["/registry/hosts/machine/kubelet"] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: nil,
		},
		E: tools.EtcdErrorValueRequired,
	}
	registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
	err := registry.CreatePod("machine", api.Pod{
		JSONBase: api.JSONBase{
			ID: "foo",
		},
	})
	if err == nil {
		t.Error("Unexpected non-error")
	}
	_, err = fakeClient.Get("/registry/hosts/machine/pods/foo", false, false)
	if err == nil {
		t.Error("Unexpected non-error")
	}
	if !tools.IsEtcdNotFound(err) {
		t.Errorf("Unexpected error: %#v", err)
	}
}
コード例 #8
0
ファイル: podmaster.go プロジェクト: chenzhen411/kubernetes
// acquireOrRenewLease either races to acquire a new master lease, or update the existing master's lease
// returns true if we have the lease, and an error if one occurs.
// TODO: use the master election utility once it is merged in.
func (c *Config) acquireOrRenewLease(etcdClient *etcd.Client) (bool, error) {
	result, err := etcdClient.Get(c.key, false, false)
	if err != nil {
		if tools.IsEtcdNotFound(err) {
			// there is no current master, try to become master, create will fail if the key already exists
			_, err := etcdClient.Create(c.key, c.whoami, c.ttl)
			if err != nil {
				return false, err
			}
			c.lastLease = time.Now()
			return true, nil
		}
		return false, err
	}
	if result.Node.Value == c.whoami {
		glog.Infof("key already exists, we are the master (%s)", result.Node.Value)
		// we extend our lease @ 1/2 of the existing TTL, this ensures the master doesn't flap around
		if result.Node.Expiration.Sub(time.Now()) < time.Duration(c.ttl/2)*time.Second {
			_, err := etcdClient.CompareAndSwap(c.key, c.whoami, c.ttl, c.whoami, result.Node.ModifiedIndex)
			if err != nil {
				return false, err
			}
		}
		c.lastLease = time.Now()
		return true, nil
	}
	glog.Infof("key already exists, the master is %s, sleeping.", result.Node.Value)
	return false, nil
}
コード例 #9
0
ファイル: etcd.go プロジェクト: rajdavies/origin
// DeleteImage deletes an existing image
func (r *Etcd) DeleteImage(id string) error {
	key := makeImageKey(id)
	err := r.Delete(key, false)
	if tools.IsEtcdNotFound(err) {
		return apierrors.NewNotFound("image", id)
	}
	return err
}
コード例 #10
0
// DeleteController deletes a ReplicationController specified by its ID.
func (registry *EtcdRegistry) DeleteController(controllerID string) error {
	key := makeControllerKey(controllerID)
	_, err := registry.etcdClient.Delete(key, false)
	if tools.IsEtcdNotFound(err) {
		return apiserver.NewNotFoundErr("replicationController", controllerID)
	}
	return err
}
コード例 #11
0
ファイル: etcd.go プロジェクト: hungld/kubernetes
// DeleteController deletes a ReplicationController specified by its ID.
func (r *Registry) DeleteController(controllerID string) error {
	key := makeControllerKey(controllerID)
	err := r.Delete(key, false)
	if tools.IsEtcdNotFound(err) {
		return errors.NewNotFound("replicationController", controllerID)
	}
	return err
}
コード例 #12
0
ファイル: etcd.go プロジェクト: eghobo/kubedash
// InterpretDeleteError converts a generic etcd error on a delete
// operation into the appropriate API error.
func InterpretDeleteError(err error, kind, name string) error {
	switch {
	case tools.IsEtcdNotFound(err):
		return errors.NewNotFound(kind, name)
	default:
		return err
	}
}
コード例 #13
0
ファイル: etcdregistry.go プロジェクト: lmiccini/origin
// DeleteBuild deletes a Build specified by its ID.
func (r *EtcdRegistry) DeleteBuild(id string) error {
	key := makeBuildKey(id)
	err := r.Delete(key, true)
	if tools.IsEtcdNotFound(err) {
		return errors.NewNotFound("build", id)
	}
	return err
}
コード例 #14
0
ファイル: etcd.go プロジェクト: rajdavies/origin
// DeleteImageRepository deletes an ImageRepository by id.
func (r *Etcd) DeleteImageRepository(id string) error {
	imageRepositoryKey := makeImageRepositoryKey(id)
	err := r.Delete(imageRepositoryKey, false)
	if err != nil && tools.IsEtcdNotFound(err) {
		return apierrors.NewNotFound("imageRepository", id)
	}
	return err
}
コード例 #15
0
ファイル: etcd.go プロジェクト: alexmavr/dashboard
// InterpretDeleteError converts a generic etcd error on a delete
// operation into the appropriate API error.
func InterpretDeleteError(err error, kind, name string) error {
	switch {
	case tools.IsEtcdNotFound(err):
		return errors.NewNotFound(kind, name)
	case errors.IsAPIStatusError(err):
		return err
	default:
		return errors.NewInternalError(err)
	}
}
コード例 #16
0
ファイル: etcdregistry.go プロジェクト: lmiccini/origin
// GetBuild gets a specific Build specified by its ID.
func (r *EtcdRegistry) GetBuild(id string) (*api.Build, error) {
	var build api.Build
	err := r.ExtractObj(makeBuildKey(id), &build, false)
	if tools.IsEtcdNotFound(err) {
		return nil, errors.NewNotFound("build", id)
	}
	if err != nil {
		return nil, err
	}
	return &build, nil
}
コード例 #17
0
ファイル: etcdregistry.go プロジェクト: lmiccini/origin
// GetBuildConfig gets a specific BuildConfig specified by its ID.
func (r *EtcdRegistry) GetBuildConfig(id string) (*api.BuildConfig, error) {
	var config api.BuildConfig
	err := r.ExtractObj(makeBuildConfigKey(id), &config, false)
	if tools.IsEtcdNotFound(err) {
		return nil, errors.NewNotFound("buildConfig", id)
	}
	if err != nil {
		return nil, err
	}
	return &config, nil
}
コード例 #18
0
// GetController gets a specific ReplicationController specified by its ID.
func (registry *EtcdRegistry) GetController(controllerID string) (*api.ReplicationController, error) {
	var controller api.ReplicationController
	key := makeControllerKey(controllerID)
	err := registry.helper().ExtractObj(key, &controller, false)
	if tools.IsEtcdNotFound(err) {
		return nil, apiserver.NewNotFoundErr("replicationController", controllerID)
	}
	if err != nil {
		return nil, err
	}
	return &controller, nil
}
コード例 #19
0
// GetService obtains a Service specified by its name.
func (registry *EtcdRegistry) GetService(name string) (*api.Service, error) {
	key := makeServiceKey(name)
	var svc api.Service
	err := registry.helper().ExtractObj(key, &svc, false)
	if tools.IsEtcdNotFound(err) {
		return nil, apiserver.NewNotFoundErr("service", name)
	}
	if err != nil {
		return nil, err
	}
	return &svc, nil
}
コード例 #20
0
ファイル: etcd.go プロジェクト: nvdnkpr/kubernetes
// GetEndpoints obtains the endpoints for the service identified by 'name'.
func (r *Registry) GetEndpoints(name string) (*api.Endpoints, error) {
	key := makeServiceEndpointsKey(name)
	var endpoints api.Endpoints
	err := r.ExtractObj(key, &endpoints, false)
	if tools.IsEtcdNotFound(err) {
		return nil, apiserver.NewNotFoundErr("endpoints", name)
	}
	if err != nil {
		return nil, err
	}
	return &endpoints, nil
}
コード例 #21
0
ファイル: etcd.go プロジェクト: asim/kubernetes
// GetServices finds the list of services and their endpoints from etcd.
// This operation is akin to a set a known good at regular intervals.
func (s ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error) {
	response, err := s.client.Get(registryRoot+"/specs", true, false)
	if err != nil {
		if tools.IsEtcdNotFound(err) {
			glog.V(1).Infof("Failed to get the key %s: %v", registryRoot, err)
		} else {
			glog.Errorf("Failed to contact etcd for key %s: %v", registryRoot, err)
		}
		return []api.Service{}, []api.Endpoints{}, err
	}
	if response.Node.Dir == true {
		retServices := make([]api.Service, len(response.Node.Nodes))
		retEndpoints := make([]api.Endpoints, len(response.Node.Nodes))
		// Ok, so we have directories, this list should be the list
		// of services. Find the local port to listen on and remote endpoints
		// and create a Service entry for it.
		for i, node := range response.Node.Nodes {
			var svc api.Service
			err = runtime.DefaultCodec.DecodeInto([]byte(node.Value), &svc)
			if err != nil {
				glog.Errorf("Failed to load Service: %s (%#v)", node.Value, err)
				continue
			}
			retServices[i] = svc
			endpoints, err := s.GetEndpoints(svc.ID)
			if err != nil {
				if tools.IsEtcdNotFound(err) {
					glog.V(1).Infof("Unable to get endpoints for %s : %v", svc.ID, err)
				}
				glog.Errorf("Couldn't get endpoints for %s : %v skipping", svc.ID, err)
				endpoints = api.Endpoints{}
			} else {
				glog.Infof("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints)
			}
			retEndpoints[i] = endpoints
		}
		return retServices, retEndpoints, err
	}
	return nil, nil, fmt.Errorf("did not get the root of the registry %s", registryRoot)
}
コード例 #22
0
ファイル: etcd.go プロジェクト: htomika/kubernetes
// run loops forever looking for changes to a key in etcd
func (s *SourceEtcd) run() {
	index := uint64(0)
	for {
		nextIndex, err := s.fetchNextState(index)
		if err != nil {
			if !tools.IsEtcdNotFound(err) {
				glog.Errorf("Unable to extract from the response (%s): %%v", s.key, err)
			}
			return
		}
		index = nextIndex
	}
}
コード例 #23
0
ファイル: service.go プロジェクト: chenzhen411/kubernetes
func (s *SchedulerServer) fetchFrameworkID(client tools.EtcdGetSet) (*mesos.FrameworkID, error) {
	if s.FailoverTimeout > 0 {
		if response, err := client.Get(meta.FrameworkIDKey, false, false); err != nil {
			if !tools.IsEtcdNotFound(err) {
				return nil, fmt.Errorf("unexpected failure attempting to load framework ID from etcd: %v", err)
			}
			log.V(1).Infof("did not find framework ID in etcd")
		} else if response.Node.Value != "" {
			log.Infof("configuring FrameworkInfo with Id found in etcd: '%s'", response.Node.Value)
			return mutil.NewFrameworkID(response.Node.Value), nil
		}
	} else {
		//TODO(jdef) this seems like a totally hackish way to clean up the framework ID
		if _, err := client.Delete(meta.FrameworkIDKey, true); err != nil {
			if !tools.IsEtcdNotFound(err) {
				return nil, fmt.Errorf("failed to delete framework ID from etcd: %v", err)
			}
			log.V(1).Infof("nothing to delete: did not find framework ID in etcd")
		}
	}
	return nil, nil
}
コード例 #24
0
ファイル: etcd.go プロジェクト: chenzhen411/kubernetes
// Refresh reloads the RangeAllocation from etcd.
func (e *Etcd) Refresh() (*api.RangeAllocation, error) {
	e.lock.Lock()
	defer e.lock.Unlock()

	existing := &api.RangeAllocation{}
	if err := e.helper.ExtractObj(e.baseKey, existing, false); err != nil {
		if tools.IsEtcdNotFound(err) {
			return nil, nil
		}
		return nil, etcderr.InterpretGetError(err, e.kind, "")
	}

	return existing, nil
}
コード例 #25
0
ファイル: etcd.go プロジェクト: linuxwhy/kubernetes
// DeleteService deletes a Service specified by its name.
func (r *Registry) DeleteService(name string) error {
	key := makeServiceKey(name)
	err := r.Delete(key, true)
	if err != nil {
		return etcderr.InterpretDeleteError(err, "service", name)
	}

	// TODO: can leave dangling endpoints, and potentially return incorrect
	// endpoints if a new service is created with the same name
	key = makeServiceEndpointsKey(name)
	if err := r.Delete(key, true); err != nil && !tools.IsEtcdNotFound(err) {
		return etcderr.InterpretDeleteError(err, "endpoints", name)
	}
	return nil
}
コード例 #26
0
ファイル: master.go プロジェクト: rajdavies/origin
func (c *config) getEtcdClient() (*etcdclient.Client, []string) {
	etcdServers := []string{"http://" + c.masterHost + ":4001"}
	etcdClient := etcdclient.NewClient(etcdServers)

	for i := 0; ; i += 1 {
		_, err := etcdClient.Get("/", false, false)
		if err == nil || tools.IsEtcdNotFound(err) {
			break
		}
		if i > 100 {
			glog.Fatal("Could not reach etcd: %v", err)
		}
		time.Sleep(50 * time.Millisecond)
	}

	return etcdClient, etcdServers
}
コード例 #27
0
ファイル: kubelet.go プロジェクト: rainsome-org1/kubernetes
func (kl *Kubelet) getKubeletStateFromEtcd(key string, updateChannel chan<- manifestUpdate) error {
	response, err := kl.EtcdClient.Get(key, true, false)
	if err != nil {
		if tools.IsEtcdNotFound(err) {
			return nil
		}
		glog.Errorf("Error on etcd get of %s: %v", key, err)
		return err
	}
	manifests, err := kl.ResponseToManifests(response)
	if err != nil {
		glog.Errorf("Error parsing response (%v): %s", response, err)
		return err
	}
	glog.Infof("Got state from etcd: %+v", manifests)
	updateChannel <- manifestUpdate{etcdSource, manifests}
	return nil
}
コード例 #28
0
ファイル: etcd.go プロジェクト: ericcapricorn/kubernetes
// GetServices finds the list of services and their endpoints from etcd.
// This operation is akin to a set a known good at regular intervals.
func (s ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error) {
	// this is a recursive query now that services are namespaced under "/registry/services/specs/<ns>/<name>"
	response, err := s.client.Get(registryRoot+"/specs", false, true)
	if err != nil {
		if tools.IsEtcdNotFound(err) {
			glog.V(4).Infof("Failed to get the key %s: %v", registryRoot, err)
		} else {
			glog.Errorf("Failed to contact etcd for key %s: %v", registryRoot, err)
		}
		return []api.Service{}, []api.Endpoints{}, err
	}
	// this code needs to go through the API server in the future, this is one big hack
	if response.Node.Dir == true {
		retServices := []api.Service{}
		retEndpoints := []api.Endpoints{}
		return s.decodeServices(response.Node, retServices, retEndpoints)
	}
	return nil, nil, fmt.Errorf("did not get the root of the registry %s", registryRoot)
}
コード例 #29
0
ファイル: etcd.go プロジェクト: ericcapricorn/kubernetes
// decodeServices recurses from the root of the service storage directory into each namespace to get each service and endpoint object
func (s ConfigSourceEtcd) decodeServices(node *etcd.Node, retServices []api.Service, retEndpoints []api.Endpoints) ([]api.Service, []api.Endpoints, error) {
	// TODO this needs to go against API server desperately, so much redundant error prone code here
	// we hit a namespace boundary, recurse until we find actual nodes
	if node.Dir == true {
		for _, n := range node.Nodes {
			var err error // Don't shadow the ret* variables.
			retServices, retEndpoints, err = s.decodeServices(n, retServices, retEndpoints)
			if err != nil {
				return retServices, retEndpoints, err
			}
		}
		return retServices, retEndpoints, nil
	}

	// we have an actual service node
	var svc api.Service
	err := latest.Codec.DecodeInto([]byte(node.Value), &svc)
	if err != nil {
		glog.Errorf("Failed to load Service: %s (%#v)", node.Value, err)
	} else {
		// so we got a service we can handle, and now get endpoints
		retServices = append(retServices, svc)
		// get the endpoints
		endpoints, err := s.GetEndpoints(svc.Namespace, svc.Name)
		if err != nil {
			if tools.IsEtcdNotFound(err) {
				glog.V(4).Infof("Unable to get endpoints for %s %s : %v", svc.Namespace, svc.Name, err)
			}
			glog.Errorf("Couldn't get endpoints for %s %s : %v skipping", svc.Namespace, svc.Name, err)
			endpoints = api.Endpoints{}
		} else {
			glog.V(3).Infof("Got service: %s %s on localport %d mapping to: %s", svc.Namespace, svc.Name, svc.Spec.Port, endpoints)
		}
		retEndpoints = append(retEndpoints, endpoints)
	}
	return retServices, retEndpoints, nil
}
コード例 #30
0
ファイル: userclient_test.go プロジェクト: cjnygard/origin
func TestUserInitialization(t *testing.T) {

	masterConfig, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	etcdClient, err := etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	etcdHelper, err := origin.NewEtcdHelper(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	userRegistry := userregistry.NewRegistry(useretcd.NewREST(etcdHelper))
	identityRegistry := identityregistry.NewRegistry(identityetcd.NewREST(etcdHelper))
	useridentityMappingRegistry := useridentitymapping.NewRegistry(useridentitymapping.NewREST(userRegistry, identityRegistry))

	lookup := identitymapper.NewLookupIdentityMapper(useridentityMappingRegistry, userRegistry)
	provisioner := identitymapper.NewAlwaysCreateUserIdentityToUserMapper(identityRegistry, userRegistry)

	testcases := map[string]struct {
		Identity authapi.UserIdentityInfo
		Mapper   authapi.UserIdentityMapper

		CreateIdentity *api.Identity
		CreateUser     *api.User
		CreateMapping  *api.UserIdentityMapping
		UpdateUser     *api.User

		ExpectedErr      error
		ExpectedUserName string
		ExpectedFullName string
	}{
		"lookup missing identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   lookup,

			ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"),
		},
		"lookup existing identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   lookup,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName: "******",
		},
		"provision missing identity and user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			ExpectedUserName: "******",
		},
		"provision missing identity and user with preferred username and display name": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityDisplayNameKey: "Bob, Sr.", authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   provisioner,

			ExpectedUserName: "******",
			ExpectedFullName: "Bob, Sr.",
		},
		"provision missing identity for existing user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateUser: makeUser("bob", "idp:bob"),

			ExpectedUserName: "******",
		},
		"provision missing identity with conflicting user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateUser: makeUser("bob"),

			ExpectedUserName: "******",
		},
		"provision missing identity with conflicting user and preferred username": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   provisioner,

			CreateUser: makeUser("admin"),

			ExpectedUserName: "******",
		},
		"provision with existing unmapped identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateIdentity: makeIdentity("idp", "bob"),

			ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"),
		},
		"provision with existing mapped identity with invalid user UID": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentityWithUserReference("idp", "bob", "mappeduser", "invalidUID"),

			ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"),
		},
		"provision with existing mapped identity without user backreference": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),
			// Update user to a version which does not reference the identity
			UpdateUser: makeUser("mappeduser"),

			ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"),
		},
		"provision returns existing mapping": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   provisioner,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName: "******",
		},
	}

	for k, testcase := range testcases {
		// Cleanup
		if err := etcdHelper.Delete(useretcd.EtcdPrefix, true); err != nil && !tools.IsEtcdNotFound(err) {
			t.Fatalf("Could not clean up users: %v", err)
		}
		if err := etcdHelper.Delete(identityetcd.EtcdPrefix, true); err != nil && !tools.IsEtcdNotFound(err) {
			t.Fatalf("Could not clean up identities: %v", err)
		}

		// Pre-create items
		if testcase.CreateUser != nil {
			_, err := clusterAdminClient.Users().Create(testcase.CreateUser)
			if err != nil {
				t.Errorf("%s: Could not create user: %v", k, err)
				continue
			}
		}
		if testcase.CreateIdentity != nil {
			_, err := clusterAdminClient.Identities().Create(testcase.CreateIdentity)
			if err != nil {
				t.Errorf("%s: Could not create identity: %v", k, err)
				continue
			}
		}
		if testcase.CreateMapping != nil {
			_, err := clusterAdminClient.UserIdentityMappings().Update(testcase.CreateMapping)
			if err != nil {
				t.Errorf("%s: Could not create mapping: %v", k, err)
				continue
			}
		}
		if testcase.UpdateUser != nil {
			if testcase.UpdateUser.ResourceVersion == "" {
				existingUser, err := clusterAdminClient.Users().Get(testcase.UpdateUser.Name)
				if err != nil {
					t.Errorf("%s: Could not get user to update: %v", k, err)
					continue
				}
				testcase.UpdateUser.ResourceVersion = existingUser.ResourceVersion
			}
			_, err := clusterAdminClient.Users().Update(testcase.UpdateUser)
			if err != nil {
				t.Errorf("%s: Could not update user: %v", k, err)
				continue
			}
		}

		// Spawn 5 simultaneous mappers to test race conditions
		var wg sync.WaitGroup
		for i := 0; i < 5; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				userInfo, err := testcase.Mapper.UserFor(testcase.Identity)
				if err != nil {
					if testcase.ExpectedErr == nil {
						t.Errorf("%s: Expected success, got error '%v'", k, err)
					} else if err.Error() != testcase.ExpectedErr.Error() {
						t.Errorf("%s: Expected error %v, got '%v'", k, testcase.ExpectedErr.Error(), err)
					}
					return
				}
				if err == nil && testcase.ExpectedErr != nil {
					t.Errorf("%s: Expected error '%v', got none", k, testcase.ExpectedErr)
					return
				}

				if userInfo.GetName() != testcase.ExpectedUserName {
					t.Errorf("%s: Expected username %s, got %s", k, testcase.ExpectedUserName, userInfo.GetName())
					return
				}

				user, err := clusterAdminClient.Users().Get(userInfo.GetName())
				if err != nil {
					t.Errorf("%s: Error getting user: %v", k, err)
				}
				if user.FullName != testcase.ExpectedFullName {
					t.Errorf("%s: Expected full name %s, got %s", k, testcase.ExpectedFullName, user.FullName)
				}

			}()
		}
		wg.Wait()
	}
}