func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
	for i := 0; i < nPods+nNotReady; i++ {
		p := &v1.Pod{
			TypeMeta: metav1.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
			ObjectMeta: v1.ObjectMeta{
				Namespace: namespace,
				Name:      fmt.Sprintf("pod%d", i),
				Labels:    map[string]string{"foo": "bar"},
			},
			Spec: v1.PodSpec{
				Containers: []v1.Container{{Ports: []v1.ContainerPort{}}},
			},
			Status: v1.PodStatus{
				PodIP: fmt.Sprintf("1.2.3.%d", 4+i),
				Conditions: []v1.PodCondition{
					{
						Type:   v1.PodReady,
						Status: v1.ConditionTrue,
					},
				},
			},
		}
		if i >= nPods {
			p.Status.Conditions[0].Status = v1.ConditionFalse
		}
		for j := 0; j < nPorts; j++ {
			p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
				v1.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)})
		}
		store.Add(p)
	}
}
Beispiel #2
0
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
	pv := newVolume("pvName", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimDelete)
	pv.ResourceVersion = version
	ret, err := storeObjectUpdate(c, pv, "volume")
	if err != nil {
		t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
	}
	if expectedReturn != ret {
		t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
	}

	// find the stored version

	pvObj, found, err := c.GetByKey("pvName")
	if err != nil {
		t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
	}
	if !found {
		t.Errorf("expected volume 'pvName' in the cache but it was not found")
	}
	pv, ok := pvObj.(*api.PersistentVolume)
	if !ok {
		t.Errorf("expected volume in the cache, got different object instead: %+v", pvObj)
	}

	if ret {
		if pv.ResourceVersion != version {
			t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
		}
	} else {
		if pv.ResourceVersion == version {
			t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
		}
	}
}
// replacePods replaces content of the store with the given pods.
func replacePods(pods []*api.Pod, store cache.Store) {
	found := make([]interface{}, 0, len(pods))
	for i := range pods {
		found = append(found, pods[i])
	}
	expectNoError(store.Replace(found, "0"))
}
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
	for i := 0; i < nPods+nNotReady; i++ {
		p := &api.Pod{
			TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Default.Version()},
			ObjectMeta: api.ObjectMeta{
				Namespace: namespace,
				Name:      fmt.Sprintf("pod%d", i),
				Labels:    map[string]string{"foo": "bar"},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{{Ports: []api.ContainerPort{}}},
			},
			Status: api.PodStatus{
				PodIP: fmt.Sprintf("1.2.3.%d", 4+i),
				Conditions: []api.PodCondition{
					{
						Type:   api.PodReady,
						Status: api.ConditionTrue,
					},
				},
			},
		}
		if i >= nPods {
			p.Status.Conditions[0].Status = api.ConditionFalse
		}
		for j := 0; j < nPorts; j++ {
			p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
				api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: 8080 + j})
		}
		store.Add(p)
	}
}
Beispiel #5
0
// Wait till the passFunc confirms that the object it expects to see is in the store.
// Used to observe reflected events.
func waitForReflection(t *testing.T, s cache.Store, key string, passFunc func(n interface{}) bool) error {
	nodes := []*api.Node{}
	err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
		if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
			return true, nil
		} else {
			if err != nil {
				t.Errorf("Unexpected error: %v", err)
			} else {
				if n == nil {
					nodes = append(nodes, nil)
				} else {
					nodes = append(nodes, n.(*api.Node))
				}
			}
			return false, nil
		}
	})
	if err != nil {
		t.Logf("Logging consecutive node versions received from store:")
		for i, n := range nodes {
			t.Logf("%d: %#v", i, n)
		}
	}
	return err
}
Beispiel #6
0
// cacheReviewRecord updates the cache based on the request processed
func cacheReviewRecord(request *reviewRequest, lastKnownValue *reviewRecord, review Review, reviewRecordStore cache.Store) {
	reviewRecord := &reviewRecord{
		reviewRequest: &reviewRequest{namespace: request.namespace, policyUIDToResourceVersion: map[types.UID]string{}, policyBindingUIDToResourceVersion: map[types.UID]string{}},
		groups:        review.Groups(),
		users:         review.Users(),
	}
	// keep what we last believe we knew by default
	if lastKnownValue != nil {
		reviewRecord.namespaceResourceVersion = lastKnownValue.namespaceResourceVersion
		for k, v := range lastKnownValue.policyUIDToResourceVersion {
			reviewRecord.policyUIDToResourceVersion[k] = v
		}
		for k, v := range lastKnownValue.policyBindingUIDToResourceVersion {
			reviewRecord.policyBindingUIDToResourceVersion[k] = v
		}
	}

	// update the review record relative to what drove this request
	if len(request.namespaceResourceVersion) > 0 {
		reviewRecord.namespaceResourceVersion = request.namespaceResourceVersion
	}
	for k, v := range request.policyUIDToResourceVersion {
		reviewRecord.policyUIDToResourceVersion[k] = v
	}
	for k, v := range request.policyBindingUIDToResourceVersion {
		reviewRecord.policyBindingUIDToResourceVersion[k] = v
	}
	// update the cache record
	reviewRecordStore.Add(reviewRecord)
}
Beispiel #7
0
// Wait till the passFunc confirms that the object it expects to see is in the store.
// Used to observe reflected events.
func waitForReflection(s cache.Store, key string, passFunc func(n interface{}) bool) error {
	return wait.Poll(time.Millisecond*10, time.Second*20, func() (bool, error) {
		if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
			return true, nil
		}
		return false, nil
	})
}
Beispiel #8
0
func lastKnown(reviewRecordStore cache.Store, namespace string) (*reviewRecord, error) {
	obj, exists, err := reviewRecordStore.GetByKey(namespace)
	if err != nil {
		return nil, err
	}
	if exists {
		return obj.(*reviewRecord), nil
	}
	return nil, nil
}
Beispiel #9
0
// purgeDeletedNamespaces will remove all namespaces enumerated in a reviewRecordStore that are not in the namespace set
func purgeDeletedNamespaces(namespaceSet *sets.String, userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) {
	reviewRecordItems := reviewRecordStore.List()
	for i := range reviewRecordItems {
		reviewRecord := reviewRecordItems[i].(*reviewRecord)
		if !namespaceSet.Has(reviewRecord.namespace) {
			deleteNamespaceFromSubjects(userSubjectRecordStore, reviewRecord.users, reviewRecord.namespace)
			deleteNamespaceFromSubjects(groupSubjectRecordStore, reviewRecord.groups, reviewRecord.namespace)
			reviewRecordStore.Delete(reviewRecord)
		}
	}
}
Beispiel #10
0
func getFromCache(store cache.Store, kind, namespace, name string) (interface{}, error) {
	key := cacheLookupKey(namespace, name)
	obj, ok, er := store.Get(key)
	if er != nil {
		return nil, er
	}
	if !ok {
		return nil, fmt.Errorf("Could not find %s %q", kind, key)
	}
	return obj, nil
}
// Wait for ingress status to be updated to match the desiredStatus.
func WaitForFedStatusUpdate(t *testing.T, store cache.Store, key string, desiredStatus apiv1.LoadBalancerStatus, timeout time.Duration) error {
	retryInterval := 100 * time.Millisecond
	err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
		obj, found, err := store.GetByKey(key)
		if !found || err != nil {
			return false, err
		}
		ingress := obj.(*extensionsv1beta1.Ingress)
		return reflect.DeepEqual(ingress.Status.LoadBalancer, desiredStatus), nil
	})
	return err
}
Beispiel #12
0
// deleteNamespaceFromSubjects removes the namespace from each subject
// if no other namespaces are active to that subject, it will also delete the subject from the cache entirely
func deleteNamespaceFromSubjects(subjectRecordStore cache.Store, subjects []string, namespace string) {
	for _, subject := range subjects {
		obj, exists, _ := subjectRecordStore.GetByKey(subject)
		if exists {
			subjectRecord := obj.(*subjectRecord)
			delete(subjectRecord.namespaces, namespace)
			if len(subjectRecord.namespaces) == 0 {
				subjectRecordStore.Delete(subjectRecord)
			}
		}
	}
}
// cleanupOrphanedPods deletes pods that are bound to nodes that don't
// exist.
func cleanupOrphanedPods(pods []*api.Pod, nodeStore cache.Store, forcefulDeletePodFunc func(*api.Pod) error) {
	for _, pod := range pods {
		if pod.Spec.NodeName == "" {
			continue
		}
		if _, exists, _ := nodeStore.GetByKey(pod.Spec.NodeName); exists {
			continue
		}
		if err := forcefulDeletePodFunc(pod); err != nil {
			utilruntime.HandleError(err)
		}
	}
}
Beispiel #14
0
// getMatchingPolicies returns policies from the store.  For now this returns everything
// in the future it can filter based on UserInfo and permissions.
func getMatchingPolicies(store cache.Store, user user.Info, sa user.Info) ([]*extensions.PodSecurityPolicy, error) {
	matchedPolicies := make([]*extensions.PodSecurityPolicy, 0)

	for _, c := range store.List() {
		constraint, ok := c.(*extensions.PodSecurityPolicy)
		if !ok {
			return nil, errors.NewInternalError(fmt.Errorf("error converting object from store to a pod security policy: %v", c))
		}
		matchedPolicies = append(matchedPolicies, constraint)
	}

	return matchedPolicies, nil
}
Beispiel #15
0
// addSubjectsToNamespace adds the specified namespace to each subject
func addSubjectsToNamespace(subjectRecordStore cache.Store, subjects []string, namespace string) {
	for _, subject := range subjects {
		var item *subjectRecord
		obj, exists, _ := subjectRecordStore.GetByKey(subject)
		if exists {
			item = obj.(*subjectRecord)
		} else {
			item = &subjectRecord{subject: subject, namespaces: sets.NewString()}
			subjectRecordStore.Add(item)
		}
		item.namespaces.Insert(namespace)
	}
}
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
// that should not be gracefully terminated.
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}, nodeStore cache.Store, forcefulDeletePodFunc func(*api.Pod) error) {
	pod, ok := obj.(*api.Pod)
	if !ok {
		return
	}

	// consider only terminating pods
	if pod.DeletionTimestamp == nil {
		return
	}

	// delete terminating pods that have not yet been scheduled
	if len(pod.Spec.NodeName) == 0 {
		utilruntime.HandleError(forcefulDeletePodFunc(pod))
		return
	}

	nodeObj, found, err := nodeStore.GetByKey(pod.Spec.NodeName)
	if err != nil {
		// this can only happen if the Store.KeyFunc has a problem creating
		// a key for the pod. If it happens once, it will happen again so
		// don't bother requeuing the pod.
		utilruntime.HandleError(err)
		return
	}

	// delete terminating pods that have been scheduled on
	// nonexistent nodes
	if !found {
		glog.Warningf("Unable to find Node: %v, deleting all assigned Pods.", pod.Spec.NodeName)
		utilruntime.HandleError(forcefulDeletePodFunc(pod))
		return
	}

	// delete terminating pods that have been scheduled on
	// nodes that do not support graceful termination
	// TODO(mikedanese): this can be removed when we no longer
	// guarantee backwards compatibility of master API to kubelets with
	// versions less than 1.1.0
	node := nodeObj.(*api.Node)
	v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
	if err != nil {
		glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
		utilruntime.HandleError(forcefulDeletePodFunc(pod))
		return
	}
	if gracefulDeletionVersion.GT(v) {
		utilruntime.HandleError(forcefulDeletePodFunc(pod))
		return
	}
}
Beispiel #17
0
// purgeDeletedNamespaces will remove all namespaces enumerated in a reviewRecordStore that are not in the namespace set
func (ac *AuthorizationCache) purgeDeletedNamespaces(oldNamespaces, newNamespaces sets.String, userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) {
	reviewRecordItems := reviewRecordStore.List()
	for i := range reviewRecordItems {
		reviewRecord := reviewRecordItems[i].(*reviewRecord)
		if !newNamespaces.Has(reviewRecord.namespace) {
			deleteNamespaceFromSubjects(userSubjectRecordStore, reviewRecord.users, reviewRecord.namespace)
			deleteNamespaceFromSubjects(groupSubjectRecordStore, reviewRecord.groups, reviewRecord.namespace)
			reviewRecordStore.Delete(reviewRecord)
		}
	}

	for namespace := range oldNamespaces.Difference(newNamespaces) {
		ac.notifyWatchers(namespace, nil, sets.String{}, sets.String{})
	}
}
Beispiel #18
0
// getMatchingSecurityContextConstraints returns constraints from the store that match the group,
// uid, or user of the service account.
func getMatchingSecurityContextConstraints(store cache.Store, userInfo user.Info) ([]*kapi.SecurityContextConstraints, error) {
	matchedConstraints := make([]*kapi.SecurityContextConstraints, 0)

	for _, c := range store.List() {
		constraint, ok := c.(*kapi.SecurityContextConstraints)
		if !ok {
			return nil, errors.NewInternalError(fmt.Errorf("error converting object from store to a security context constraint: %v", c))
		}
		if ConstraintAppliesTo(constraint, userInfo) {
			matchedConstraints = append(matchedConstraints, constraint)
		}
	}

	return matchedConstraints, nil
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
	pods := []api.Pod{}
	var trueVar = true
	controllerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
	for i := 0; i < count; i++ {
		pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status)
		pod.OwnerReferences = []api.OwnerReference{controllerReference}
		if store != nil {
			store.Add(pod)
		}
		pods = append(pods, *pod)
	}
	return &api.PodList{
		Items: pods,
	}
}
// Wait for finalizers to appear in federation store.
func WaitForFinalizersInFederationStore(ingressController *IngressController, store cache.Store, key string) error {
	retryInterval := 100 * time.Millisecond
	timeout := wait.ForeverTestTimeout
	err := wait.PollImmediate(retryInterval, timeout, func() (bool, error) {
		obj, found, err := store.GetByKey(key)
		if !found || err != nil {
			return false, err
		}
		ingress := obj.(*extensionsv1beta1.Ingress)
		if ingressController.hasFinalizerFunc(ingress, apiv1.FinalizerOrphan) &&
			ingressController.hasFinalizerFunc(ingress, deletionhelper.FinalizerDeleteFromUnderlyingClusters) {
			return true, nil
		}
		return false, nil
	})
	return err
}
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *api.PodList {
	pods := []api.Pod{}
	var trueVar = true
	controllerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
	for i := 0; i < count; i++ {
		pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status)
		pod.ObjectMeta.Labels = labelMap
		pod.OwnerReferences = []api.OwnerReference{controllerReference}
		if store != nil {
			store.Add(pod)
		}
		pods = append(pods, *pod)
	}
	return &api.PodList{
		Items: pods,
	}
}
Beispiel #22
0
// SyncAllPodsWithStore lists all pods and inserts them into the given store.
// Though this function is written in a generic manner, it is only used by the
// controllers for a specific purpose, to synchronously populate the store
// with the first batch of pods that would otherwise be sent by the Informer.
// Doing this avoids more complicated forms of synchronization with the
// Informer, though it also means that the controller calling this function
// will receive "OnUpdate" events for all the pods in the store, instead of
// "OnAdd". This should be ok, since most controllers are level triggered
// and make decisions based on the contents of the store.
//
// TODO: Extend this logic to load arbitrary local state for the controllers
// instead of just pods.
func SyncAllPodsWithStore(kubeClient clientset.Interface, store cache.Store) {
	var allPods *api.PodList
	var err error
	listOptions := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
	for {
		if allPods, err = kubeClient.Legacy().Pods(api.NamespaceAll).List(listOptions); err != nil {
			glog.Warningf("Retrying pod list: %v", err)
			continue
		}
		break
	}
	pods := []interface{}{}
	for i := range allPods.Items {
		p := allPods.Items[i]
		glog.V(4).Infof("Initializing store with pod %v/%v", p.Namespace, p.Name)
		pods = append(pods, &p)
	}
	store.Replace(pods, allPods.ResourceVersion)
	return
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController) *v1.PodList {
	pods := []v1.Pod{}
	for i := 0; i < count; i++ {
		newPod := v1.Pod{
			ObjectMeta: metav1.ObjectMeta{
				Name:      fmt.Sprintf("pod%d", i),
				Labels:    rc.Spec.Selector,
				Namespace: rc.Namespace,
			},
			Status: v1.PodStatus{Phase: status},
		}
		if store != nil {
			store.Add(&newPod)
		}
		pods = append(pods, newPod)
	}
	return &v1.PodList{
		Items: pods,
	}
}
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet) *api.PodList {
	pods := []api.Pod{}
	for i := 0; i < count; i++ {
		newPod := api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:      fmt.Sprintf("pod%d", i),
				Labels:    labelMap,
				Namespace: rs.Namespace,
			},
			Status: api.PodStatus{Phase: status},
		}
		if store != nil {
			store.Add(&newPod)
		}
		pods = append(pods, newPod)
	}
	return &api.PodList{
		Items: pods,
	}
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
	pods := []api.Pod{}
	for i := 0; i < count; i++ {
		newPod := api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:      fmt.Sprintf("%s%d", name, i),
				Labels:    rc.Spec.Selector,
				Namespace: rc.Namespace,
			},
			Status: api.PodStatus{Phase: status},
		}
		if store != nil {
			store.Add(&newPod)
		}
		pods = append(pods, newPod)
	}
	return &api.PodList{
		Items: pods,
	}
}
Beispiel #26
0
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeStore cache.Store, claimSource cache.ListerWatcher) {
	volumeList := volumeStore.List()
	for _, obj := range volumeList {
		volume, ok := obj.(*api.PersistentVolume)
		if !ok {
			glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %#v", obj)
		}
		// Ignore template volumes from kubernetes 1.2
		deleted := ctrl.upgradeVolumeFrom1_2(volume)
		if !deleted {
			clone, err := conversion.NewCloner().DeepCopy(volume)
			if err != nil {
				glog.Errorf("error cloning volume %q: %v", volume.Name, err)
				continue
			}
			volumeClone := clone.(*api.PersistentVolume)
			ctrl.storeVolumeUpdate(volumeClone)
		}
	}

	claimListObj, err := claimSource.List(api.ListOptions{})
	if err != nil {
		glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
		return
	}
	claimList, ok := claimListObj.(*api.PersistentVolumeClaimList)
	if !ok {
		glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %#v", claimListObj)
		return
	}
	for _, claim := range claimList.Items {
		clone, err := conversion.NewCloner().DeepCopy(&claim)
		if err != nil {
			glog.Errorf("error cloning claim %q: %v", claimToClaimKey(&claim), err)
			continue
		}
		claimClone := clone.(*api.PersistentVolumeClaim)
		ctrl.storeClaimUpdate(claimClone)
	}
	glog.V(4).Infof("controller initialized")
}
Beispiel #27
0
// getDefaultClass returns the default StorageClass from the store, or nil.
func getDefaultClass(store cache.Store) (*storage.StorageClass, error) {
	defaultClasses := []*storage.StorageClass{}
	for _, c := range store.List() {
		class, ok := c.(*storage.StorageClass)
		if !ok {
			return nil, errors.NewInternalError(fmt.Errorf("error converting stored object to StorageClass: %v", c))
		}
		if class.Annotations[isDefaultAnnotation] == "true" {
			defaultClasses = append(defaultClasses, class)
			glog.V(4).Infof("getDefaultClass added: %s", class.Name)
		}
	}

	if len(defaultClasses) == 0 {
		return nil, nil
	}
	if len(defaultClasses) > 1 {
		glog.V(4).Infof("getDefaultClass %s defaults found", len(defaultClasses))
		return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
	}
	return defaultClasses[0], nil
}
func getEndpointsForService(endpointsStore kubeCache.Store, s *kubeAPI.Service) (*kubeAPI.Endpoints, error) {
	var (
		err    error
		key    string
		obj    interface{}
		exists bool
		ok     bool
		e      *kubeAPI.Endpoints
	)
	if key, err = kubeCache.MetaNamespaceKeyFunc(s); err != nil {
		return nil, err
	}
	if obj, exists, err = endpointsStore.GetByKey(key); err != nil {
		return nil, fmt.Errorf("Error getting endpoints object from endpoints store - %v", err)
	}
	if !exists {
		log.WithFields(log.Fields{"name": s.Name, "namespace": s.Namespace}).Warn("Unable to find endpoint for service")
		return nil, nil
	}
	if e, ok = obj.(*kubeAPI.Endpoints); !ok {
		return nil, fmt.Errorf("got a non endpoints object in endpoints store %v", obj)
	}
	return e, nil
}
func getServiceFromEndpoints(serviceStore kubeCache.Store, e *kubeAPI.Endpoints) (*kubeAPI.Service, error) {
	var (
		err    error
		key    string
		obj    interface{}
		exists bool
		ok     bool
		svc    *kubeAPI.Service
	)
	if key, err = kubeCache.MetaNamespaceKeyFunc(e); err != nil {
		return nil, err
	}
	if obj, exists, err = serviceStore.GetByKey(key); err != nil {
		return nil, fmt.Errorf("Error getting service object from services store - %v", err)
	}
	if !exists {
		log.WithFields(log.Fields{"name": e.Name, "namespace": e.Namespace}).Warn("Unable to find service for endpoint")
		return nil, nil
	}
	if svc, ok = obj.(*kubeAPI.Service); !ok {
		return nil, fmt.Errorf("got a non service object in services store %v", obj)
	}
	return svc, nil
}
// storeObjectUpdate updates given cache with a new object version from Informer
// callback (i.e. with events from etcd) or with an object modified by the
// controller itself. Returns "true", if the cache was updated, false if the
// object is an old version and should be ignored.
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) {
	objName, err := controller.KeyFunc(obj)
	if err != nil {
		return false, fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)
	}
	oldObj, found, err := store.Get(obj)
	if err != nil {
		return false, fmt.Errorf("Error finding %s %q in controller cache: %v", className, objName, err)
	}

	objAccessor, err := meta.Accessor(obj)
	if err != nil {
		return false, err
	}

	if !found {
		// This is a new object
		glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
		if err = store.Add(obj); err != nil {
			return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
		}
		return true, nil
	}

	oldObjAccessor, err := meta.Accessor(oldObj)
	if err != nil {
		return false, err
	}

	objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
	if err != nil {
		return false, fmt.Errorf("Error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
	}
	oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
	if err != nil {
		return false, fmt.Errorf("Error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
	}

	// Throw away only older version, let the same version pass - we do want to
	// get periodic sync events.
	if oldObjResourceVersion > objResourceVersion {
		glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
		return false, nil
	}

	glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
	if err = store.Update(obj); err != nil {
		return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
	}
	return true, nil
}