func (r RealPodControl) createReplica(namespace string, controller api.ReplicationController) {
	desiredLabels := make(labels.Set)
	for k, v := range controller.Spec.Template.Labels {
		desiredLabels[k] = v
	}
	desiredAnnotations := make(labels.Set)
	for k, v := range controller.Spec.Template.Annotations {
		desiredAnnotations[k] = v
	}

	// use the dash (if the name isn't too long) to make the pod name a bit prettier
	prefix := fmt.Sprintf("%s-", controller.Name)
	if ok, _ := validation.ValidatePodName(prefix, true); !ok {
		prefix = controller.Name
	}

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Labels:       desiredLabels,
			Annotations:  desiredAnnotations,
			GenerateName: prefix,
		},
	}
	if err := api.Scheme.Convert(&controller.Spec.Template.Spec, &pod.Spec); err != nil {
		util.HandleError(fmt.Errorf("unable to convert pod template: %v", err))
		return
	}
	if labels.Set(pod.Labels).AsSelector().Empty() {
		util.HandleError(fmt.Errorf("unable to create pod replica, no labels"))
		return
	}
	if _, err := r.kubeClient.Pods(namespace).Create(pod); err != nil {
		util.HandleError(fmt.Errorf("unable to create pod replica: %v", err))
	}
}
Example #2
0
// invalidateCache returns true if there was a change in the cluster namespace that holds cluster policy and policy bindings
func (ac *AuthorizationCache) invalidateCache() bool {
	invalidateCache := false

	clusterPolicyList, err := ac.policyClient.ReadOnlyClusterPolicies().List(labels.Everything(), fields.Everything())
	if err != nil {
		util.HandleError(err)
		return invalidateCache
	}

	temporaryVersions := util.NewStringSet()
	for _, clusterPolicy := range clusterPolicyList.Items {
		temporaryVersions.Insert(clusterPolicy.ResourceVersion)
	}
	if (len(ac.clusterPolicyResourceVersions) != len(temporaryVersions)) || !ac.clusterPolicyResourceVersions.HasAll(temporaryVersions.List()...) {
		invalidateCache = true
		ac.clusterPolicyResourceVersions = temporaryVersions
	}

	clusterPolicyBindingList, err := ac.policyClient.ReadOnlyClusterPolicyBindings().List(labels.Everything(), fields.Everything())
	if err != nil {
		util.HandleError(err)
		return invalidateCache
	}

	temporaryVersions.Delete(temporaryVersions.List()...)
	for _, clusterPolicyBinding := range clusterPolicyBindingList.Items {
		temporaryVersions.Insert(clusterPolicyBinding.ResourceVersion)
	}
	if (len(ac.clusterBindingResourceVersions) != len(temporaryVersions)) || !ac.clusterBindingResourceVersions.HasAll(temporaryVersions.List()...) {
		invalidateCache = true
		ac.clusterBindingResourceVersions = temporaryVersions
	}
	return invalidateCache
}
Example #3
0
func (r *Reflector) listAndWatch(stopCh <-chan struct{}) {
	var resourceVersion string
	resyncCh, cleanup := r.resyncChan()
	defer cleanup()

	list, err := r.listerWatcher.List()
	if err != nil {
		util.HandleError(fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err))
		return
	}
	meta, err := meta.Accessor(list)
	if err != nil {
		util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v", r.name, list))
		return
	}
	resourceVersion = meta.ResourceVersion()
	items, err := runtime.ExtractList(list)
	if err != nil {
		util.HandleError(fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err))
		return
	}
	if err := r.syncWith(items); err != nil {
		util.HandleError(fmt.Errorf("%s: Unable to sync list result: %v", r.name, err))
		return
	}
	r.setLastSyncResourceVersion(resourceVersion)

	for {
		w, err := r.listerWatcher.Watch(resourceVersion)
		if err != nil {
			switch err {
			case io.EOF:
				// watch closed normally
			case io.ErrUnexpectedEOF:
				glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
			default:
				util.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
			}
			// If this is "connection refused" error, it means that most likely apiserver is not responsive.
			// It doesn't make sense to re-list all objects because most likely we will be able to restart
			// watch where we ended.
			// If that's the case wait and resend watch request.
			if urlError, ok := err.(*url.Error); ok {
				if opError, ok := urlError.Err.(*net.OpError); ok {
					if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
						time.Sleep(time.Second)
						continue
					}
				}
			}
			return
		}
		if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil {
			if err != errorResyncRequested && err != errorStopRequested {
				glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
			}
			return
		}
	}
}
Example #4
0
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, resyncCh <-chan time.Time, stopCh <-chan struct{}) error {
	start := time.Now()
	eventCount := 0

	// Stopping the watcher should be idempotent and if we return from this function there's no way
	// we're coming back in with the same watch interface.
	defer w.Stop()

loop:
	for {
		select {
		case <-stopCh:
			return errorStopRequested
		case <-resyncCh:
			return errorResyncRequested
		case event, ok := <-w.ResultChan():
			if !ok {
				break loop
			}
			if event.Type == watch.Error {
				return apierrs.FromObject(event.Object)
			}
			if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
				util.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
				continue
			}
			meta, err := meta.Accessor(event.Object)
			if err != nil {
				util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
				continue
			}
			switch event.Type {
			case watch.Added:
				r.store.Add(event.Object)
			case watch.Modified:
				r.store.Update(event.Object)
			case watch.Deleted:
				// TODO: Will any consumers need access to the "last known
				// state", which is passed in event.Object? If so, may need
				// to change this.
				r.store.Delete(event.Object)
			default:
				util.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
			}
			*resourceVersion = meta.ResourceVersion()
			r.setLastSyncResourceVersion(*resourceVersion)
			eventCount++
		}
	}

	watchDuration := time.Now().Sub(start)
	if watchDuration < 1*time.Second && eventCount == 0 {
		glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
		return errors.New("very short watch")
	}
	glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
	return nil
}
Example #5
0
// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
	// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
	// or if they are executed against different leaders,
	// the ordering guarantee required to ensure no IP is allocated twice is violated.
	// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
	// and the release code must not release services that have had IPs allocated but not yet been created
	// See #8295

	latest, err := c.alloc.Get()
	if err != nil {
		return fmt.Errorf("unable to refresh the service IP block: %v", err)
	}

	ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
	list, err := c.registry.ListServices(ctx)
	if err != nil {
		return fmt.Errorf("unable to refresh the service IP block: %v", err)
	}

	r := ipallocator.NewCIDRRange(c.network)
	for _, svc := range list.Items {
		if !api.IsServiceIPSet(&svc) {
			continue
		}
		ip := net.ParseIP(svc.Spec.PortalIP)
		if ip == nil {
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace))
			continue
		}
		switch err := r.Allocate(ip); err {
		case nil:
		case ipallocator.ErrAllocated:
			// TODO: send event
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace))
		case ipallocator.ErrNotInRange:
			// TODO: send event
			// portal IP is broken, reallocate
			util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network))
		case ipallocator.ErrFull:
			// TODO: send event
			return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services")
		default:
			return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err)
		}
	}

	err = r.Snapshot(latest)
	if err != nil {
		return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
	}

	if err := c.alloc.CreateOrUpdate(latest); err != nil {
		return fmt.Errorf("unable to persist the updated service IP allocations: %v", err)
	}
	return nil
}
// resourceVersion is a pointer to the resource version to use/update.
func (rm *ReplicationManager) watchControllers(resourceVersion *string) {
	watching, err := rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(
		labels.Everything(),
		fields.Everything(),
		*resourceVersion,
	)
	if err != nil {
		util.HandleError(fmt.Errorf("unable to watch: %v", err))
		time.Sleep(5 * time.Second)
		return
	}

	for {
		select {
		case <-rm.syncTime:
			rm.synchronize()
		case event, open := <-watching.ResultChan():
			if !open {
				// watchChannel has been closed, or something else went
				// wrong with our watch call. Let the util.Forever()
				// that called us call us again.
				return
			}
			if event.Type == watch.Error {
				util.HandleError(fmt.Errorf("error from watch during sync: %v", errors.FromObject(event.Object)))
				// Clear the resource version, this may cause us to skip some elements on the watch,
				// but we'll catch them on the synchronize() call, so it works out.
				*resourceVersion = ""
				continue
			}
			glog.V(4).Infof("Got watch: %#v", event)
			rc, ok := event.Object.(*api.ReplicationController)
			if !ok {
				if status, ok := event.Object.(*api.Status); ok {
					if status.Status == api.StatusFailure {
						glog.Errorf("failed to watch: %v", status)
						// Clear resource version here, as above, this won't hurt consistency, but we
						// should consider introspecting more carefully here. (or make the apiserver smarter)
						// "why not both?"
						*resourceVersion = ""
						continue
					}
				}
				util.HandleError(fmt.Errorf("unexpected object: %#v", event.Object))
				continue
			}
			// If we get disconnected, start where we left off.
			*resourceVersion = rc.ResourceVersion
			// Sync even if this is a deletion event, to ensure that we leave
			// it in the desired state.
			glog.V(4).Infof("About to sync from watch: %v", rc.Name)
			if err := rm.syncHandler(*rc); err != nil {
				util.HandleError(fmt.Errorf("unexpected sync error: %v", err))
			}
		}
	}
}
Example #7
0
// RunOnce verifies the state of the port allocations and returns an error if an unrecoverable problem occurs.
func (c *Repair) RunOnce() error {
	// TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read,
	// or if they are executed against different leaders,
	// the ordering guarantee required to ensure no port is allocated twice is violated.
	// ListServices must return a ResourceVersion higher than the etcd index Get triggers,
	// and the release code must not release services that have had ports allocated but not yet been created
	// See #8295

	latest, err := c.alloc.Get()
	if err != nil {
		return fmt.Errorf("unable to refresh the port block: %v", err)
	}

	ctx := api.WithNamespace(api.NewDefaultContext(), api.NamespaceAll)
	list, err := c.registry.ListServices(ctx)
	if err != nil {
		return fmt.Errorf("unable to refresh the port block: %v", err)
	}

	r := portallocator.NewPortAllocator(c.portRange)
	for i := range list.Items {
		svc := &list.Items[i]
		ports := service.CollectServiceNodePorts(svc)
		if len(ports) == 0 {
			continue
		}

		for _, port := range ports {
			switch err := r.Allocate(port); err {
			case nil:
			case portallocator.ErrAllocated:
				// TODO: send event
				// port is broken, reallocate
				util.HandleError(fmt.Errorf("the port %d for service %s/%s was assigned to multiple services; please recreate", port, svc.Name, svc.Namespace))
			case portallocator.ErrNotInRange:
				// TODO: send event
				// port is broken, reallocate
				util.HandleError(fmt.Errorf("the port %d for service %s/%s is not within the port range %v; please recreate", port, svc.Name, svc.Namespace, c.portRange))
			case portallocator.ErrFull:
				// TODO: send event
				return fmt.Errorf("the port range %v is full; you must widen the port range in order to create new services", c.portRange)
			default:
				return fmt.Errorf("unable to allocate port %d for service %s/%s due to an unknown error, exiting: %v", port, svc.Name, svc.Namespace, err)
			}
		}
	}

	err = r.Snapshot(latest)
	if err != nil {
		return fmt.Errorf("unable to persist the updated port allocations: %v", err)
	}

	if err := c.alloc.CreateOrUpdate(latest); err != nil {
		return fmt.Errorf("unable to persist the updated port allocations: %v", err)
	}
	return nil
}
// handleLocationChange goes through all service account dockercfg secrets and updates them to point at a new docker-registry location
func (e *DockerRegistryServiceController) handleLocationChange(serviceLocation string) error {
	e.dockercfgController.SetDockerURL(serviceLocation)

	dockercfgSecrets, err := e.listDockercfgSecrets()
	if err != nil {
		return err
	}

	for _, dockercfgSecret := range dockercfgSecrets {
		dockercfg := &credentialprovider.DockerConfig{}
		if err := json.Unmarshal(dockercfgSecret.Data[api.DockerConfigKey], dockercfg); err != nil {
			util.HandleError(err)
			continue
		}

		dockercfgMap := map[string]credentialprovider.DockerConfigEntry(*dockercfg)
		keys := util.KeySet(reflect.ValueOf(dockercfgMap))
		if len(keys) != 1 {
			util.HandleError(err)
			continue
		}
		oldKey := keys.List()[0]

		// if there's no change, skip
		if oldKey == serviceLocation {
			continue
		}

		dockercfgMap[serviceLocation] = dockercfgMap[oldKey]
		delete(dockercfgMap, oldKey)
		t := credentialprovider.DockerConfig(dockercfgMap)
		dockercfg = &t

		dockercfgContent, err := json.Marshal(dockercfg)
		if err != nil {
			util.HandleError(err)
			continue
		}
		dockercfgSecret.Data[api.DockerConfigKey] = dockercfgContent

		if _, err := e.client.Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil {
			util.HandleError(err)
			continue
		}
	}

	return err
}
Example #9
0
// synchronizePolicyBindings synchronizes access over each policy binding
func (ac *AuthorizationCache) synchronizePolicyBindings(userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) {
	policyBindingList, err := ac.policyClient.ReadOnlyPolicyBindings(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
	if err != nil {
		util.HandleError(err)
		return
	}
	for _, policyBinding := range policyBindingList.Items {
		reviewRequest := &reviewRequest{
			namespace:                         policyBinding.Namespace,
			policyBindingUIDToResourceVersion: map[types.UID]string{policyBinding.UID: policyBinding.ResourceVersion},
		}
		if err := ac.syncHandler(reviewRequest, userSubjectRecordStore, groupSubjectRecordStore, reviewRecordStore); err != nil {
			util.HandleError(fmt.Errorf("error synchronizing: %v", err))
		}
	}
}
Example #10
0
// RoundTrip sends the request to the backend and strips off the CORS headers
// before returning the response.
func (p *UpgradeAwareSingleHostReverseProxy) RoundTrip(req *http.Request) (*http.Response, error) {
	resp, err := p.transport.RoundTrip(req)
	if err != nil {
		return resp, err
	}

	removeCORSHeaders(resp)
	removeChallengeHeaders(resp)
	if resp.StatusCode == http.StatusUnauthorized {
		util.HandleError(fmt.Errorf("got unauthorized error from backend for: %s %s", req.Method, req.URL))
		// Internal error, backend didn't recognize proxy identity
		// Surface as a server error to the client
		// TODO do we need to do more than this?
		resp = &http.Response{
			StatusCode:    http.StatusInternalServerError,
			Status:        http.StatusText(http.StatusInternalServerError),
			Body:          ioutil.NopCloser(strings.NewReader("Internal Server Error")),
			ContentLength: -1,
		}
	}

	// TODO do we need to strip off anything else?

	return resp, err
}
Example #11
0
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()

	c := &ImportController{
		client:   dockerregistry.NewClient(),
		streams:  f.Client,
		mappings: f.Client,
	}

	return &controller.RetryController{
		Queue: q,
		RetryManager: controller.NewQueueRetryManager(
			q,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				util.HandleError(err)
				return retries.Count < 5
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			r := obj.(*api.ImageStream)
			return c.Next(r)
		},
	}
}
Example #12
0
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()

	imageChangeController := &buildcontroller.ImageChangeController{
		BuildConfigStore:        store,
		BuildConfigInstantiator: factory.BuildConfigInstantiator,
		Stop: factory.Stop,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
					return false
				}
				return retries.Count < maxRetries
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			imageRepo := obj.(*imageapi.ImageStream)
			return imageChangeController.HandleImageRepo(imageRepo)
		},
	}
}
Example #13
0
// Create constructs a BuildPodController
func (factory *BuildPodControllerFactory) Create() controller.RunnableController {
	factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).Run()

	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).Run()

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildPodController := &buildcontroller.BuildPodController{
		BuildStore:   factory.buildStore,
		BuildUpdater: factory.BuildUpdater,
		PodManager:   client,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				return retries.Count < maxRetries
			},
			kutil.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			pod := obj.(*kapi.Pod)
			return buildPodController.HandlePod(pod)
		},
	}
}
Example #14
0
func (r confirmTemplateRenderer) Render(form ConfirmForm, w http.ResponseWriter, req *http.Request) {
	w.Header().Add("Content-Type", "text/html")
	w.WriteHeader(http.StatusOK)
	if err := confirmTemplate.Execute(w, form); err != nil {
		util.HandleError(fmt.Errorf("unable render confirm template: %v", err))
	}
}
Example #15
0
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
	util.Until(func() {
		if err := c.UpdateKubernetesService(); err != nil {
			util.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
		}
	}, c.EndpointInterval, ch)
}
// secretDeleted reacts to a Secret being deleted by looking to see if it's a dockercfg secret for a service account, in which case it
// it removes the references from the service account and removes the token created to back the dockercfgSecret
func (e *DockercfgDeletedController) secretDeleted(obj interface{}) {
	dockercfgSecret, ok := obj.(*api.Secret)
	if !ok {
		return
	}
	if _, exists := dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]; !exists {
		return
	}

	for i := 1; i <= NumServiceAccountUpdateRetries; i++ {
		if err := e.removeDockercfgSecretReference(dockercfgSecret); err != nil {
			if kapierrors.IsConflict(err) && i < NumServiceAccountUpdateRetries {
				time.Sleep(wait.Jitter(100*time.Millisecond, 0.0))
				continue
			}

			glog.Error(err)
			break
		}

		break
	}

	// remove the reference token secret
	if err := e.client.Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey]); (err != nil) && !kapierrors.IsNotFound(err) {
		util.HandleError(err)
	}
}
func (tx *tx) Rollback() {
	for _, fn := range tx.rollback {
		if err := fn(); err != nil {
			util.HandleError(fmt.Errorf("unable to undo tx: %v", err))
		}
	}
}
Example #18
0
// Edges are added to the graph from each predecessor (pod or replication
// controller) to the images specified by the pod spec's list of containers, as
// long as the image is managed by OpenShift.
func addPodSpecToGraph(g graph.Graph, spec *kapi.PodSpec, predecessor gonum.Node) {
	for j := range spec.Containers {
		container := spec.Containers[j]

		glog.V(4).Infof("Examining container image %q", container.Image)

		ref, err := imageapi.ParseDockerImageReference(container.Image)
		if err != nil {
			util.HandleError(fmt.Errorf("unable to parse DockerImageReference %q: %v", container.Image, err))
			continue
		}

		if len(ref.ID) == 0 {
			glog.V(4).Infof("%q has no image ID", container.Image)
			continue
		}

		imageNode := graph.FindImage(g, ref.ID)
		if imageNode == nil {
			glog.Infof("Unable to find image %q in the graph", ref.ID)
			continue
		}

		glog.V(4).Infof("Adding edge from pod to image")
		g.AddEdge(predecessor, imageNode, graph.ReferencedImageGraphEdgeKind)
	}
}
Example #19
0
// NewServiceGroup returns the ServiceGroup and a set of all the NodeIDs covered by the service service
func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (ServiceGroup, IntSet) {
	covered := IntSet{}
	covered.Insert(serviceNode.ID())

	service := ServiceGroup{}
	service.Service = serviceNode

	for _, uncastServiceFulfiller := range g.PredecessorNodesByEdgeKind(serviceNode, kubeedges.ExposedThroughServiceEdgeKind) {
		container := osgraph.GetTopLevelContainerNode(g, uncastServiceFulfiller)

		switch castContainer := container.(type) {
		case *deploygraph.DeploymentConfigNode:
			service.FulfillingDCs = append(service.FulfillingDCs, castContainer)
		case *kubegraph.ReplicationControllerNode:
			service.FulfillingRCs = append(service.FulfillingRCs, castContainer)
		case *kubegraph.PodNode:
			service.FulfillingPods = append(service.FulfillingPods, castContainer)

		default:
			util.HandleError(fmt.Errorf("unrecognized container: %v", castContainer))
		}
	}

	// add the DCPipelines for all the DCs that fulfill the service
	for _, fulfillingDC := range service.FulfillingDCs {
		dcPipeline, dcCovers := NewDeploymentConfigPipeline(g, fulfillingDC)

		covered.Insert(dcCovers.List()...)
		service.DeploymentConfigPipelines = append(service.DeploymentConfigPipelines, dcPipeline)
	}

	return service, covered
}
// serviceAccountAdded reacts to a ServiceAccount creation by creating a corresponding ServiceAccountToken Secret
func (e *DockercfgController) serviceAccountAdded(obj interface{}) {
	serviceAccount := obj.(*api.ServiceAccount)

	if err := e.createDockercfgSecretIfNeeded(serviceAccount); err != nil {
		util.HandleError(err)
	}
}
Example #21
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	util.Until(func() {
		if err := c.RunOnce(); err != nil {
			util.HandleError(err)
		}
	}, c.interval, ch)
}
// serviceAccountUpdated reacts to a ServiceAccount update (or re-list) by ensuring a corresponding ServiceAccountToken Secret exists
func (e *DockercfgController) serviceAccountUpdated(oldObj interface{}, newObj interface{}) {
	newServiceAccount := newObj.(*api.ServiceAccount)

	if err := e.createDockercfgSecretIfNeeded(newServiceAccount); err != nil {
		util.HandleError(err)
	}
}
Example #23
0
// Delete deletes the user association for the named identity
func (s *REST) Delete(ctx kapi.Context, name string) (runtime.Object, error) {
	identity, _, user, _, _, mappingErr := s.getRelatedObjects(ctx, name)

	if mappingErr != nil {
		return nil, mappingErr
	}

	// Disassociate the identity with the user first
	// If this fails, Delete is re-entrant
	if removeIdentityFromUser(identity, user) {
		if _, err := s.userRegistry.UpdateUser(ctx, user); err != nil {
			return nil, err
		}
	}

	// Remove the user association from the identity last.
	// If this fails, log the error, but continue, because Delete is no longer re-entrant
	// At this point, the mapping for the identity no longer exists
	if unsetIdentityUser(identity) {
		if _, err := s.identityRegistry.UpdateIdentity(ctx, identity); err != nil {
			util.HandleError(fmt.Errorf("error removing user reference %s from identity %s: %v", user.Name, identity.Name, err))
		}
	}

	return &kapi.Status{Status: kapi.StatusSuccess}, nil
}
Example #24
0
// pruneImages invokes imagePruneFunc with each image that is prunable, along
// with the image streams that reference the image. After imagePruneFunc is
// invoked, the image node is removed from the graph, so that layers eligible
// for pruning may later be identified.
func pruneImages(g graph.Graph, imageNodes []*graph.ImageNode, pruneImage ImagePruneFunc, pruneStream ImageStreamPruneFunc, pruneManifest ManifestPruneFunc) {
	for _, imageNode := range imageNodes {
		glog.V(4).Infof("Examining image %q", imageNode.Image.Name)

		if !imageIsPrunable(g, imageNode) {
			glog.V(4).Infof("Image has strong references - not pruning")
			continue
		}

		glog.V(4).Infof("Image has only weak references - pruning")

		if err := pruneImage(imageNode.Image); err != nil {
			util.HandleError(fmt.Errorf("error pruning image %q: %v", imageNode.Image.Name, err))
		}

		for _, n := range g.Predecessors(imageNode) {
			if streamNode, ok := n.(*graph.ImageStreamNode); ok {
				stream := streamNode.ImageStream
				repoName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name)

				glog.V(4).Infof("Pruning image from stream %s", repoName)
				updatedStream, err := pruneStream(stream, imageNode.Image)
				if err != nil {
					util.HandleError(fmt.Errorf("error pruning image from stream: %v", err))
					continue
				}

				streamNode.ImageStream = updatedStream

				ref, err := imageapi.DockerImageReferenceForStream(stream)
				if err != nil {
					util.HandleError(fmt.Errorf("error constructing DockerImageReference for %q: %v", repoName, err))
					continue
				}

				glog.V(4).Infof("Invoking pruneManifest for registry %q, repo %q, image %q", ref.Registry, repoName, imageNode.Image.Name)
				if err := pruneManifest(ref.Registry, repoName, imageNode.Image.Name); err != nil {
					util.HandleError(fmt.Errorf("error pruning manifest for registry %q, repo %q, image %q: %v", ref.Registry, repoName, imageNode.Image.Name, err))
				}
			}
		}

		// remove pruned image node from graph, for layer pruning later
		g.RemoveNode(imageNode)
	}
}
// manageReplicas checks and updates replicas for the given replication controller.
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) {
	diff := len(filteredPods) - rc.Spec.Replicas
	rcKey, err := controller.KeyFunc(rc)
	if err != nil {
		glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
		return
	}
	if diff < 0 {
		diff *= -1
		if diff > rm.burstReplicas {
			diff = rm.burstReplicas
		}
		rm.expectations.ExpectCreations(rcKey, diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				if err := rm.podControl.CreateReplica(rc.Namespace, rc); err != nil {
					// Decrement the expected number of creates because the informer won't observe this pod
					glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
					rm.expectations.CreationObserved(rcKey)
					util.HandleError(err)
				}
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		if diff > rm.burstReplicas {
			diff = rm.burstReplicas
		}
		rm.expectations.ExpectDeletions(rcKey, diff)
		glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
		// No need to sort pods if we are about to delete all of them
		if rc.Spec.Replicas != 0 {
			// Sort the pods in the order such that not-ready < ready, unscheduled
			// < scheduled, and pending < running. This ensures that we delete pods
			// in the earlier stages whenever possible.
			sort.Sort(controller.ActivePods(filteredPods))
		}

		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name); err != nil {
					// Decrement the expected number of deletes because the informer won't observe this deletion
					glog.V(2).Infof("Failed deletion, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
					rm.expectations.DeletionObserved(rcKey)
				}
			}(i)
		}
		wait.Wait()
	}
}
Example #26
0
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	changeController := &DeploymentConfigChangeController{
		changeStrategy: &changeStrategyImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Generate(name)
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Update(config)
			},
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(fatalError); isFatal {
					return false
				}
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return changeController.Handle(config)
		},
	}
}
// serviceAdded reacts to the creation of a docker-registry service by updating all service account dockercfg secrets and
// changing all interestedURLs
func (e *DockerRegistryServiceController) serviceAdded(obj interface{}) {
	service := obj.(*api.Service)
	if service.Name != e.registryServiceName {
		return
	}

	if err := e.handleLocationChange(e.getServiceLocation(service)); err != nil {
		util.HandleError(err)
	}
}
Example #28
0
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()

	imageChangeController := &buildcontroller.ImageChangeController{
		BuildConfigStore:        store,
		BuildConfigInstantiator: factory.BuildConfigInstantiator,
		Stop: factory.Stop,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				imageStream := obj.(*imageapi.ImageStream)
				if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
					glog.V(3).Infof("Will not retry fatal error for ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
					kutil.HandleError(err)
					return false
				}
				if maxRetries > retries.Count {
					glog.V(3).Infof("Giving up retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
					kutil.HandleError(err)
					return false
				}
				glog.V(4).Infof("Retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err)
				return true

			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			imageRepo := obj.(*imageapi.ImageStream)
			return imageChangeController.HandleImageRepo(imageRepo)
		},
	}
}
Example #29
0
// pruneLayers creates a mapping of registryURLs to
// server.DeleteLayersRequest objects, invoking layerPruneFunc for each
// registryURL and request.
func pruneLayers(g graph.Graph, layerNodes []*graph.ImageLayerNode, pruneLayer LayerPruneFunc, pruneBlob BlobPruneFunc) {
	for _, layerNode := range layerNodes {
		glog.V(4).Infof("Examining layer %q", layerNode.Layer)

		if !layerIsPrunable(g, layerNode) {
			glog.V(4).Infof("Layer %q has image references - not pruning", layerNode.Layer)
			continue
		}

		registries := util.NewStringSet()

		// get streams that reference layer
		streamNodes := streamLayerReferences(g, layerNode)

		for _, streamNode := range streamNodes {
			stream := streamNode.ImageStream
			streamName := fmt.Sprintf("%s/%s", stream.Namespace, stream.Name)
			glog.V(4).Infof("Layer has an ImageStream predecessor: %s", streamName)

			ref, err := imageapi.DockerImageReferenceForStream(stream)
			if err != nil {
				util.HandleError(fmt.Errorf("error constructing DockerImageReference for %q: %v", streamName, err))
				continue
			}

			if !registries.Has(ref.Registry) {
				registries.Insert(ref.Registry)
				glog.V(4).Infof("Invoking pruneBlob with registry=%q, blob=%q", ref.Registry, layerNode.Layer)
				if err := pruneBlob(ref.Registry, layerNode.Layer); err != nil {
					util.HandleError(fmt.Errorf("error invoking pruneBlob: %v", err))
				}
			}

			repoName := fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
			glog.V(4).Infof("Invoking pruneLayer with registry=%q, repo=%q, layer=%q", ref.Registry, repoName, layerNode.Layer)
			if err := pruneLayer(ref.Registry, repoName, layerNode.Layer); err != nil {
				util.HandleError(fmt.Errorf("error invoking pruneLayer: %v", err))
			}
		}
	}
}
Example #30
0
func GeneratedConfigHandler(config WebConsoleConfig, h http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if strings.TrimPrefix(r.URL.Path, "/") == "config.js" {
			w.Header().Add("Cache-Control", "no-cache, no-store")
			w.Header().Add("Content-Type", "application/json")
			if err := configTemplate.Execute(w, config); err != nil {
				util.HandleError(fmt.Errorf("unable to render config template: %v", err))
			}
			return
		}
		h.ServeHTTP(w, r)
	})
}