コード例 #1
0
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer e.queue.ShutDown()

	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)

	if !cache.WaitForCacheSync(stopCh, e.podStoreSynced) {
		return
	}

	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer utilruntime.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()

	if e.internalPodInformer != nil {
		go e.internalPodInformer.Run(stopCh)
	}

	<-stopCh
}
コード例 #2
0
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer utilruntime.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()
	<-stopCh
	e.queue.ShutDown()
}
コード例 #3
0
ファイル: cluster.go プロジェクト: Xmagicer/origin
func (c *clusterResourceVersionObserver) ObserveResourceVersion(resourceVersion string, timeout time.Duration) error {
	if len(c.watchers) == 0 {
		return nil
	}

	wg := &sync.WaitGroup{}
	backendErrors := make([]error, len(c.watchers), len(c.watchers))
	for i, watcher := range c.watchers {
		wg.Add(1)
		go func(i int, watcher rest.Watcher) {
			defer utilruntime.HandleCrash()
			defer wg.Done()
			backendErrors[i] = watchForResourceVersion(c.versioner, watcher, resourceVersion, timeout)
		}(i, watcher)
	}

	glog.V(5).Infof("waiting for resourceVersion %s to be distributed", resourceVersion)
	wg.Wait()

	successes := 0
	for _, err := range backendErrors {
		if err == nil {
			successes++
		} else {
			glog.V(4).Infof("error verifying resourceVersion %s: %v", resourceVersion, err)
		}
	}
	glog.V(5).Infof("resourceVersion %s was distributed to %d etcd cluster members (out of %d)", resourceVersion, successes, len(c.watchers))

	if successes >= c.successThreshold {
		return nil
	}

	return fmt.Errorf("resourceVersion %s was observed on %d cluster members (threshold %d): %v", resourceVersion, successes, c.successThreshold, backendErrors)
}
コード例 #4
0
ファイル: cadvisor_linux.go プロジェクト: Q-Lee/kubernetes
func (cc *cadvisorClient) exportHTTP(port uint) error {
	// Register the handlers regardless as this registers the prometheus
	// collector properly.
	mux := http.NewServeMux()
	err := cadvisorhttp.RegisterHandlers(mux, cc, "", "", "", "")
	if err != nil {
		return err
	}

	cadvisorhttp.RegisterPrometheusHandler(mux, cc, "/metrics", containerLabels)

	// Only start the http server if port > 0
	if port > 0 {
		serv := &http.Server{
			Addr:    fmt.Sprintf(":%d", port),
			Handler: mux,
		}

		// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
		// If export failed, retry in the background until we are able to bind.
		// This allows an existing cAdvisor to be killed before this one registers.
		go func() {
			defer runtime.HandleCrash()

			err := serv.ListenAndServe()
			for err != nil {
				glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
				time.Sleep(time.Minute)
				err = serv.ListenAndServe()
			}
		}()
	}

	return nil
}
コード例 #5
0
ファイル: horizontal.go プロジェクト: RomainVabre/origin
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting HPA Controller")
	go a.controller.Run(stopCh)
	<-stopCh
	glog.Infof("Shutting down HPA Controller")
}
コード例 #6
0
ファイル: shared_informer.go プロジェクト: ncdc/kubernetes
func (p *processorListener) pop(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	for {
		blockingGet := func() (interface{}, bool) {
			p.lock.Lock()
			defer p.lock.Unlock()

			for len(p.pendingNotifications) == 0 {
				// check if we're shutdown
				select {
				case <-stopCh:
					return nil, true
				default:
				}
				p.cond.Wait()
			}

			nt := p.pendingNotifications[0]
			p.pendingNotifications = p.pendingNotifications[1:]
			return nt, false
		}

		notification, stopped := blockingGet()
		if stopped {
			return
		}

		select {
		case <-stopCh:
			return
		case p.nextCh <- notification:
		}
	}
}
コード例 #7
0
ファイル: iowatcher.go プロジェクト: Clarifai/kubernetes
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
	defer close(sw.result)
	defer sw.Stop()
	defer utilruntime.HandleCrash()
	for {
		action, obj, err := sw.source.Decode()
		if err != nil {
			// Ignore expected error.
			if sw.stopping() {
				return
			}
			switch err {
			case io.EOF:
				// watch closed normally
			case io.ErrUnexpectedEOF:
				glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
			default:
				msg := "Unable to decode an event from the watch stream: %v"
				if net.IsProbableEOF(err) {
					glog.V(5).Infof(msg, err)
				} else {
					glog.Errorf(msg, err)
				}
			}
			return
		}
		sw.result <- Event{
			Type:   action,
			Object: obj,
		}
	}
}
コード例 #8
0
ファイル: dockerutil.go プロジェクト: Xmagicer/origin
// buildImage invokes a docker build on a particular directory
func buildImage(client DockerClient, dir string, dockerfilePath string, noCache bool, tag string, tar tar.Tar, pullAuth *docker.AuthConfigurations, forcePull bool, cgLimits *s2iapi.CGroupLimits) error {
	// TODO: be able to pass a stream directly to the Docker build to avoid the double temp hit
	r, w := io.Pipe()
	go func() {
		defer utilruntime.HandleCrash()
		defer w.Close()
		if err := tar.CreateTarStream(dir, false, w); err != nil {
			w.CloseWithError(err)
		}
	}()
	defer w.Close()
	glog.V(5).Infof("Invoking Docker build to create %q", tag)
	opts := docker.BuildImageOptions{
		Name:           tag,
		RmTmpContainer: true,
		OutputStream:   os.Stdout,
		InputStream:    r,
		Dockerfile:     dockerfilePath,
		NoCache:        noCache,
		Pull:           forcePull,
	}
	if cgLimits != nil {
		opts.Memory = cgLimits.MemoryLimitBytes
		opts.Memswap = cgLimits.MemorySwap
		opts.CPUShares = cgLimits.CPUShares
		opts.CPUPeriod = cgLimits.CPUPeriod
		opts.CPUQuota = cgLimits.CPUQuota
	}
	if pullAuth != nil {
		opts.AuthConfigs = *pullAuth
	}
	return client.BuildImage(opts)
}
コード例 #9
0
ファイル: stream.go プロジェクト: astropuffin/kubernetes
// handle implements a WebSocket handler.
func (r *Reader) handle(ws *websocket.Conn) {
	// Close the connection when the client requests it, or when we finish streaming, whichever happens first
	closeConnOnce := &sync.Once{}
	closeConn := func() {
		closeConnOnce.Do(func() {
			ws.Close()
		})
	}

	negotiated := ws.Config().Protocol
	r.selectedProtocol = negotiated[0]
	defer close(r.err)
	defer closeConn()

	go func() {
		defer runtime.HandleCrash()
		// This blocks until the connection is closed.
		// Client should not send anything.
		IgnoreReceives(ws, r.timeout)
		// Once the client closes, we should also close
		closeConn()
	}()

	r.err <- messageCopy(ws, r.r, !r.protocols[r.selectedProtocol].Binary, r.ping, r.timeout)
}
コード例 #10
0
ファイル: controller.go プロジェクト: xgwang-zte/origin
// Run begins watching and syncing.
func (ic *IngressIPController) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go ic.controller.Run(stopCh)

	glog.V(5).Infof("Waiting for the initial sync to be completed")
	for !ic.controller.HasSynced() {
		select {
		case <-time.After(SyncProcessedPollPeriod):
		case <-stopCh:
			return
		}
	}

	if !ic.processInitialSync() {
		return
	}

	glog.V(5).Infof("Starting normal worker")
	for {
		if !ic.work() {
			break
		}
	}

	glog.V(5).Infof("Shutting down ingress ip controller")
	ic.queue.ShutDown()
}
コード例 #11
0
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
	defer utilruntime.HandleCrash()
	defer w.wg.Done()
	defer Logf("Closing worker for %v", w.nodeName)
	select {
	case <-time.After(initialSleep):
		// TODO: remove after #21313 is fixed
		Logf("Probing %v", w.nodeName)
		w.singleProbe()
		// TODO: remove after #21313 is fixed
		Logf("Finished probe for %v", w.nodeName)
		for {
			select {
			case <-time.After(resourceDataGatheringPeriod):
				// TODO: remove after #21313 is fixed
				Logf("Probing %v", w.nodeName)
				w.singleProbe()
				// TODO: remove after #21313 is fixed
				Logf("Finished probe for %v", w.nodeName)
			case <-w.stopCh:
				return
			}
		}
	case <-w.stopCh:
		return
	}
}
コード例 #12
0
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
func TestAdmitExceedQuotaLimit(t *testing.T) {
	resourceQuota := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("3"),
				api.ResourceMemory: resource.MustParse("100Gi"),
				api.ResourcePods:   resource.MustParse("5"),
			},
			Used: api.ResourceList{
				api.ResourceCPU:    resource.MustParse("1"),
				api.ResourceMemory: resource.MustParse("50Gi"),
				api.ResourcePods:   resource.MustParse("3"),
			},
		},
	}
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
	evaluator.indexer = indexer
	stopCh := make(chan struct{})
	defer close(stopCh)
	defer utilruntime.HandleCrash()
	go evaluator.Run(5, stopCh)
	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	indexer.Add(resourceQuota)
	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error exceeding quota")
	}
}
コード例 #13
0
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
	resourceQuota := &api.ResourceQuota{}
	resourceQuota.Name = "quota"
	resourceQuota.Namespace = "test"
	resourceQuota.Status = api.ResourceQuotaStatus{
		Hard: api.ResourceList{},
		Used: api.ResourceList{},
	}
	resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
	resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
	kubeClient := fake.NewSimpleClientset(resourceQuota)
	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
	evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
	evaluator.indexer = indexer
	stopCh := make(chan struct{})
	defer close(stopCh)
	defer utilruntime.HandleCrash()
	go evaluator.Run(5, stopCh)
	handler := &quotaAdmission{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		evaluator: evaluator,
	}
	indexer.Add(resourceQuota)
	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
	err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err == nil {
		t.Errorf("Expected an error because the pod exceeded allowed quota")
	}
	err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil))
	if err != nil {
		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
	}
}
コード例 #14
0
func (e *DockercfgController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Wait for the store to sync before starting any work in this controller.
	ready := make(chan struct{})
	go e.waitForDockerURLs(ready, stopCh)
	select {
	case <-ready:
	case <-stopCh:
		return
	}
	glog.Infof("Dockercfg secret controller initialized, starting.")

	go e.serviceAccountController.Run(stopCh)
	go e.secretController.Run(stopCh)
	for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
		time.Sleep(100 * time.Millisecond)
	}

	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}

	<-stopCh
	glog.Infof("Shutting down dockercfg secret controller")
	e.queue.ShutDown()
}
コード例 #15
0
ファイル: cacher.go プロジェクト: olegshaldybin/kubernetes
func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) {
	defer utilruntime.HandleCrash()

	// Check how long we are processing initEvents.
	// As long as these are not processed, we are not processing
	// any incoming events, so if it takes long, we may actually
	// block all watchers for some time.
	// TODO: If it appears to be long in some cases, we may consider
	// - longer result buffers if there are a lot of initEvents
	// - try some parallelization
	const initProcessThreshold = 5 * time.Millisecond
	startTime := time.Now()
	for _, event := range initEvents {
		c.sendWatchCacheEvent(event)
	}
	processingTime := time.Since(startTime)
	if processingTime > initProcessThreshold {
		glog.V(2).Infof("processing %d initEvents took %v", len(initEvents), processingTime)
	}

	defer close(c.result)
	defer c.Stop()
	for {
		event, ok := <-c.input
		if !ok {
			return
		}
		// only send events newer than resourceVersion
		if event.ResourceVersion > resourceVersion {
			c.sendWatchCacheEvent(event)
		}
	}
}
コード例 #16
0
// worker processes the queue of namespace objects.
// Each namespace can be in the queue at most once.
// The system ensures that no two workers can process
// the same namespace at the same time.
func (nm *NamespaceController) worker() {
	for {
		func() {
			key, quit := nm.queue.Get()
			if quit {
				return
			}
			defer nm.queue.Done(key)
			if err := nm.syncNamespaceFromKey(key.(string)); err != nil {
				if estimate, ok := err.(*contentRemainingError); ok {
					go func() {
						defer utilruntime.HandleCrash()
						t := estimate.Estimate/2 + 1
						glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t)
						time.Sleep(time.Duration(t) * time.Second)
						nm.queue.Add(key)
					}()
				} else {
					// rather than wait for a full resync, re-add the namespace to the queue to be processed
					nm.queue.Add(key)
					utilruntime.HandleError(err)
				}
			}
		}()
	}
}
コード例 #17
0
ファイル: goroutinemap.go プロジェクト: CodeJuan/kubernetes
func (grm *goRoutineMap) Run(
	operationName string,
	operationFunc func() error) error {
	grm.lock.Lock()
	defer grm.lock.Unlock()

	existingOp, exists := grm.operations[operationName]
	if exists {
		// Operation with name exists
		if existingOp.operationPending {
			return NewAlreadyExistsError(operationName)
		}

		if err := existingOp.expBackoff.SafeToRetry(operationName); err != nil {
			return err
		}
	}

	grm.operations[operationName] = operation{
		operationPending: true,
		expBackoff:       existingOp.expBackoff,
	}
	go func() (err error) {
		// Handle unhandled panics (very unlikely)
		defer k8sRuntime.HandleCrash()
		// Handle completion of and error, if any, from operationFunc()
		defer grm.operationComplete(operationName, &err)
		// Handle panic, if any, from operationFunc()
		defer k8sRuntime.RecoverFromPanic(&err)
		return operationFunc()
	}()

	return nil
}
コード例 #18
0
ファイル: wait.go プロジェクト: odacremolbap/kubernetes
// JitterUntil loops until stop channel is closed, running f every period.
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged.
// Catches any panics, and keeps going. f may not be invoked if
// stop channel is already closed. Pass NeverStop to Until if you
// don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, stopCh <-chan struct{}) {
	select {
	case <-stopCh:
		return
	default:
	}

	for {
		func() {
			defer runtime.HandleCrash()
			f()
		}()

		jitteredPeriod := period
		if jitterFactor > 0.0 {
			jitteredPeriod = Jitter(period, jitterFactor)
		}

		select {
		case <-stopCh:
			return
		case <-time.After(jitteredPeriod):
		}
	}
}
コード例 #19
0
ファイル: tokens_controller.go プロジェクト: ncdc/kubernetes
// Runs controller blocks until stopCh is closed
func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Start controllers (to fill stores, call informers, fill work queues)
	go e.serviceAccountController.Run(stopCh)
	go e.secretController.Run(stopCh)

	// Wait for stores to fill
	for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
		time.Sleep(100 * time.Millisecond)
	}

	// Spawn workers to process work queues
	for i := 0; i < workers; i++ {
		go wait.Until(e.syncServiceAccount, 0, stopCh)
		go wait.Until(e.syncSecret, 0, stopCh)
	}

	// Block until stop channel is closed
	<-stopCh

	// Shut down queues
	e.syncServiceAccountQueue.ShutDown()
	e.syncSecretQueue.ShutDown()
}
コード例 #20
0
ファイル: shared_informer.go プロジェクト: 40a/bootkube
func (p *processorListener) pop(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	p.lock.Lock()
	defer p.lock.Unlock()
	for {
		for len(p.pendingNotifications) == 0 {
			// check if we're shutdown
			select {
			case <-stopCh:
				return
			default:
			}

			p.cond.Wait()
		}
		notification := p.pendingNotifications[0]
		p.pendingNotifications = p.pendingNotifications[1:]

		select {
		case <-stopCh:
			return
		case p.nextCh <- notification:
		}
	}
}
コード例 #21
0
ファイル: shared_informer.go プロジェクト: ncdc/kubernetes
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer)

	cfg := &Config{
		Queue:            fifo,
		ListerWatcher:    s.listerWatcher,
		ObjectType:       s.objectType,
		FullResyncPeriod: s.fullResyncPeriod,
		RetryOnError:     false,

		Process: s.HandleDeltas,
	}

	func() {
		s.startedLock.Lock()
		defer s.startedLock.Unlock()

		s.controller = New(cfg)
		s.started = true
	}()

	s.stopCh = stopCh
	s.processor.run(stopCh)
	s.controller.Run(stopCh)
}
コード例 #22
0
// Run begins quota controller using the specified number of workers
func (c *ClusterQuotaReconcilationController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Wait for the stores to sync before starting any work in this controller.
	ready := make(chan struct{})
	go c.waitForSyncedStores(ready, stopCh)
	select {
	case <-ready:
	case <-stopCh:
		return
	}

	// the controllers that replenish other resources to respond rapidly to state changes
	for _, replenishmentController := range c.replenishmentControllers {
		go replenishmentController.Run(stopCh)
	}

	// the workers that chug through the quota calculation backlog
	for i := 0; i < workers; i++ {
		go wait.Until(c.worker, time.Second, stopCh)
	}

	// the timer for how often we do a full recalculation across all quotas
	go wait.Until(func() { c.calculateAll() }, c.resyncPeriod, stopCh)

	<-stopCh
	glog.Infof("Shutting down ClusterQuotaReconcilationController")
	c.queue.ShutDown()
}
コード例 #23
0
ファイル: shared_informer.go プロジェクト: ncdc/kubernetes
func (p *processorListener) run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	for {
		var next interface{}
		select {
		case <-stopCh:
			func() {
				p.lock.Lock()
				defer p.lock.Unlock()
				p.cond.Broadcast()
			}()
			return
		case next = <-p.nextCh:
		}

		switch notification := next.(type) {
		case updateNotification:
			p.handler.OnUpdate(notification.oldObj, notification.newObj)
		case addNotification:
			p.handler.OnAdd(notification.newObj)
		case deleteNotification:
			p.handler.OnDelete(notification.oldObj)
		default:
			utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
		}
	}
}
コード例 #24
0
ファイル: proxysocket.go プロジェクト: juanluisvaladas/origin
func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr net.Addr, loadBalancer LoadBalancer, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
	activeClients.Lock()
	defer activeClients.Unlock()

	svrConn, found := activeClients.Clients[cliAddr.String()]
	if !found {
		// TODO: This could spin up a new goroutine to make the outbound connection,
		// and keep accepting inbound traffic.
		glog.V(3).Infof("New UDP connection from %s", cliAddr)
		var err error
		svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer)
		if err != nil {
			return nil, err
		}
		if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
			glog.Errorf("SetDeadline failed: %v", err)
			return nil, err
		}
		activeClients.Clients[cliAddr.String()] = svrConn
		go func(cliAddr net.Addr, svrConn net.Conn, activeClients *ClientCache, timeout time.Duration) {
			defer runtime.HandleCrash()
			udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
		}(cliAddr, svrConn, activeClients, timeout)
	}
	return svrConn, nil
}
コード例 #25
0
// killContainersWithSyncResult kills all pod's containers with sync results.
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
	containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
	wg := sync.WaitGroup{}

	wg.Add(len(runningPod.Containers))
	for _, container := range runningPod.Containers {
		go func(container *kubecontainer.Container) {
			defer utilruntime.HandleCrash()
			defer wg.Done()

			killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name)
			if err := m.killContainer(pod, container.ID, container.Name, "Need to kill Pod", gracePeriodOverride); err != nil {
				killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
			}
			containerResults <- killContainerResult
		}(container)
	}
	wg.Wait()
	close(containerResults)

	for containerResult := range containerResults {
		syncResults = append(syncResults, containerResult)
	}
	return
}
コード例 #26
0
ファイル: handlers.go プロジェクト: RyanBinfeng/kubernetes
// RecoverPanics wraps an http Handler to recover and log panics.
func RecoverPanics(handler http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		defer runtime.HandleCrash(func(err interface{}) {
			http.Error(w, "This request caused apisever to panic. Look in log for details.", http.StatusInternalServerError)
			glog.Errorf("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, err, debug.Stack())
		})
		defer httplog.NewLogged(req, &w).StacktraceWhen(
			httplog.StatusIsNot(
				http.StatusOK,
				http.StatusCreated,
				http.StatusAccepted,
				http.StatusBadRequest,
				http.StatusMovedPermanently,
				http.StatusTemporaryRedirect,
				http.StatusConflict,
				http.StatusNotFound,
				http.StatusUnauthorized,
				http.StatusForbidden,
				errors.StatusUnprocessableEntity,
				http.StatusSwitchingProtocols,
			),
		).Log()

		// Dispatch to the internal handler
		handler.ServeHTTP(w, req)
	})
}
コード例 #27
0
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
	// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
	// when the select statement reads something other than the one the goroutine sends on.
	ch := make(chan runtime.Object, 1)
	errCh := make(chan error, 1)
	panicCh := make(chan interface{}, 1)
	go func() {
		// panics don't cross goroutine boundaries, so we have to handle ourselves
		defer utilruntime.HandleCrash(func(panicReason interface{}) {
			// Propagate to parent goroutine
			panicCh <- panicReason
		})

		if result, err := fn(); err != nil {
			errCh <- err
		} else {
			ch <- result
		}
	}()

	select {
	case result = <-ch:
		if status, ok := result.(*unversioned.Status); ok {
			return nil, errors.FromObject(status)
		}
		return result, nil
	case err = <-errCh:
		return nil, err
	case p := <-panicCh:
		panic(p)
	case <-time.After(timeout):
		return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
	}
}
コード例 #28
0
ファイル: goroutinemap.go プロジェクト: XbinZh/kubernetes
func (grm *goRoutineMap) Run(operationName string, operationFunc func() error) error {
	grm.Lock()
	defer grm.Unlock()
	existingOp, exists := grm.operations[operationName]
	if exists {
		// Operation with name exists
		if existingOp.operationPending {
			return newAlreadyExistsError(operationName)
		}

		if time.Since(existingOp.lastErrorTime) <= existingOp.durationBeforeRetry {
			return newExponentialBackoffError(operationName, existingOp)
		}
	}

	grm.operations[operationName] = operation{
		operationPending:    true,
		lastError:           existingOp.lastError,
		lastErrorTime:       existingOp.lastErrorTime,
		durationBeforeRetry: existingOp.durationBeforeRetry,
	}
	go func() (err error) {
		// Handle unhandled panics (very unlikely)
		defer k8sRuntime.HandleCrash()
		// Handle completion of and error, if any, from operationFunc()
		defer grm.operationComplete(operationName, &err)
		// Handle panic, if any, from operationFunc()
		defer recoverFromPanic(operationName, &err)
		return operationFunc()
	}()

	return nil
}
コード例 #29
0
ファイル: stream.go プロジェクト: Clarifai/kubernetes
// Copy the reader to the response. The created WebSocket is closed after this
// method completes.
func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error {
	go func() {
		defer runtime.HandleCrash()
		websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req)
	}()
	return <-r.err
}
コード例 #30
0
ファイル: controller.go プロジェクト: Clarifai/kubernetes
// Run begins watching and syncing.
func (e *quotaEvaluator) Run(workers int) {
	defer utilruntime.HandleCrash()

	for i := 0; i < workers; i++ {
		go wait.Until(e.doWork, time.Second, make(chan struct{}))
	}
}