Exemplo n.º 1
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	util.Until(func() {
		if err := c.RunOnce(); err != nil {
			util.HandleError(err)
		}
	}, c.interval, ch)
}
Exemplo n.º 2
0
// RunQingYuanService periodically updates the qingyuan service
func (c *Controller) RunQingYuanService(ch chan struct{}) {
	util.Until(func() {
		if err := c.UpdateQingYuanService(); err != nil {
			util.HandleError(fmt.Errorf("unable to sync qingyuan service: %v", err))
		}
	}, c.EndpointInterval, ch)
}
func (cm *containerManagerImpl) Start() error {
	// Don't run a background thread if there are no ensureStateFuncs.
	numEnsureStateFuncs := 0
	for _, cont := range cm.systemContainers {
		if cont.ensureStateFunc != nil {
			numEnsureStateFuncs++
		}
	}
	if numEnsureStateFuncs == 0 {
		return nil
	}

	// Run ensure state functions every minute.
	go util.Until(func() {
		for _, cont := range cm.systemContainers {
			if cont.ensureStateFunc != nil {
				if err := cont.ensureStateFunc(cont.manager); err != nil {
					glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
				}
			}
		}
	}, time.Minute, util.NeverStop)

	return nil
}
Exemplo n.º 4
0
func (m *Master) setupSecureProxy(user, privateKeyfile, publicKeyfile string) {
	// Sync loop to ensure that the SSH key has been installed.
	go util.Until(func() {
		if m.installSSHKey == nil {
			glog.Error("Won't attempt to install ssh key: installSSHKey function is nil")
			return
		}
		key, err := util.ParsePublicKeyFromFile(publicKeyfile)
		if err != nil {
			glog.Errorf("Failed to load public key: %v", err)
			return
		}
		keyData, err := util.EncodeSSHKey(key)
		if err != nil {
			glog.Errorf("Failed to encode public key: %v", err)
			return
		}
		if err := m.installSSHKey(user, keyData); err != nil {
			glog.Errorf("Failed to install ssh key: %v", err)
		}
	}, 5*time.Minute, util.NeverStop)
	// Sync loop for tunnels
	// TODO: switch this to watch.
	go util.Until(func() {
		if err := m.loadTunnels(user, privateKeyfile); err != nil {
			glog.Errorf("Failed to load SSH Tunnels: %v", err)
		}
		if m.tunnels != nil && m.tunnels.Len() != 0 {
			// Sleep for 10 seconds if we have some tunnels.
			// TODO (cjcullen): tunnels can lag behind actually existing nodes.
			time.Sleep(9 * time.Second)
		}
	}, 1*time.Second, util.NeverStop)
	// Refresh loop for tunnels
	// TODO: could make this more controller-ish
	go util.Until(func() {
		time.Sleep(5 * time.Minute)
		if err := m.refreshTunnels(user, privateKeyfile); err != nil {
			glog.Errorf("Failed to refresh SSH Tunnels: %v", err)
		}
	}, 0*time.Second, util.NeverStop)
}
// Run begins watching and syncing.
func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
	defer util.HandleCrash()
	go rm.rcController.Run(stopCh)
	go rm.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(rm.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down RC Manager")
	rm.queue.ShutDown()
}
Exemplo n.º 6
0
func newPortRangeAllocator(r util.PortRange) PortAllocator {
	if r.Base == 0 || r.Size == 0 {
		panic("illegal argument: may not specify an empty port range")
	}
	ra := &rangeAllocator{
		PortRange: r,
		ports:     make(chan int, portsBufSize),
		rand:      rand.New(rand.NewSource(time.Now().UnixNano())),
	}
	go util.Until(func() { ra.fillPorts(util.NeverStop) }, nextFreePortCooldown, util.NeverStop)
	return ra
}
func TestUpdatePods(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	received := make(chan string)

	manager.syncHandler = func(key string) error {
		obj, exists, err := manager.controllerStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		received <- obj.(*api.ReplicationController).Name
		return nil
	}

	stopCh := make(chan struct{})
	defer close(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	// Put 2 rcs and one pod into the controller's stores
	testControllerSpec1 := newReplicationController(1)
	manager.controllerStore.Store.Add(testControllerSpec1)
	testControllerSpec2 := *testControllerSpec1
	testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
	testControllerSpec2.Name = "barfoo"
	manager.controllerStore.Store.Add(&testControllerSpec2)

	// Put one pod in the podStore
	pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1).Items[0]
	pod2 := pod1
	pod2.Labels = testControllerSpec2.Spec.Selector

	// Send an update of the same pod with modified labels, and confirm we get a sync request for
	// both controllers
	manager.updatePod(&pod1, &pod2)

	expected := util.NewStringSet(testControllerSpec1.Name, testControllerSpec2.Name)
	for _, name := range expected.List() {
		t.Logf("Expecting update for %+v", name)
		select {
		case got := <-received:
			if !expected.Has(got) {
				t.Errorf("Expected keys %#v got %v", expected, got)
			}
		case <-time.After(controllerTimeout):
			t.Errorf("Expected update notifications for controllers within 100ms each")
		}
	}
}
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
	defer util.HandleCrash()
	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer util.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()
	<-stopCh
	e.queue.ShutDown()
}
Exemplo n.º 9
0
// runs the main qinglet loop, closing the qingletFinished chan when the loop exits.
// never returns.
func (kl *qingletExecutor) Run(updates <-chan qinglet.PodUpdate) {
	defer func() {
		close(kl.qingletFinished)
		util.HandleCrash()
		log.Infoln("qinglet run terminated") //TODO(jdef) turn down verbosity
		// important: never return! this is in our contract
		select {}
	}()

	// push updates through a closable pipe. when the executor indicates shutdown
	// via Done() we want to stop the Qinglet from processing updates.
	pipe := make(chan qinglet.PodUpdate)
	go func() {
		// closing pipe will cause our patched qinglet's syncLoop() to exit
		defer close(pipe)
	pipeLoop:
		for {
			select {
			case <-kl.executorDone:
				break pipeLoop
			default:
				select {
				case u := <-updates:
					select {
					case pipe <- u: // noop
					case <-kl.executorDone:
						break pipeLoop
					}
				case <-kl.executorDone:
					break pipeLoop
				}
			}
		}
	}()

	// we expect that Run() will complete after the pipe is closed and the
	// qinglet's syncLoop() has finished processing its backlog, which hopefully
	// will not take very long. Peeking into the future (current k8s master) it
	// seems that the backlog has grown from 1 to 50 -- this may negatively impact
	// us going forward, time will tell.
	util.Until(func() { kl.Qinglet.Run(pipe) }, 0, kl.executorDone)

	//TODO(jdef) revisit this if/when executor failover lands
	err := kl.SyncPods([]*api.Pod{}, nil, nil, time.Now())
	if err != nil {
		log.Errorf("failed to cleanly remove all pods and associated state: %v", err)
	}
}
Exemplo n.º 10
0
// Run begins processing items, and will continue until a value is sent down stopCh.
// It's an error to call Run more than once.
// Run blocks; call via go.
func (c *Controller) Run(stopCh <-chan struct{}) {
	defer util.HandleCrash()
	r := cache.NewReflector(
		c.config.ListerWatcher,
		c.config.ObjectType,
		c.config.Queue,
		c.config.FullResyncPeriod,
	)

	c.reflectorMutex.Lock()
	c.reflector = r
	c.reflectorMutex.Unlock()

	r.RunUntil(stopCh)

	util.Until(c.processLoop, time.Second, stopCh)
}
func TestWatchPods(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	// Put one rc and one pod into the controller's stores
	testControllerSpec := newReplicationController(1)
	manager.controllerStore.Store.Add(testControllerSpec)
	received := make(chan string)
	// The pod update sent through the fakeWatcher should figure out the managing rc and
	// send it into the syncHandler.
	manager.syncHandler = func(key string) error {

		obj, exists, err := manager.controllerStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		controllerSpec := obj.(*api.ReplicationController)
		if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
			t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
		}
		close(received)
		return nil
	}
	// Start only the pod watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method for the right rc.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go manager.podController.Run(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	pods := newPodList(nil, 1, api.PodRunning, testControllerSpec)
	testPod := pods.Items[0]
	testPod.Status.Phase = api.PodFailed
	fakeWatch.Add(&testPod)

	select {
	case <-received:
	case <-time.After(controllerTimeout):
		t.Errorf("Expected 1 call but got 0")
	}
}
func TestWatchControllers(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	var testControllerSpec api.ReplicationController
	received := make(chan string)

	// The update sent through the fakeWatcher should make its way into the workqueue,
	// and eventually into the syncHandler. The handler validates the received controller
	// and closes the received channel to indicate that the test can finish.
	manager.syncHandler = func(key string) error {

		obj, exists, err := manager.controllerStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		controllerSpec := *obj.(*api.ReplicationController)
		if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
			t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
		}
		close(received)
		return nil
	}
	// Start only the rc watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go manager.rcController.Run(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	testControllerSpec.Name = "foo"
	fakeWatch.Add(&testControllerSpec)

	select {
	case <-received:
	case <-time.After(controllerTimeout):
		t.Errorf("Expected 1 call but got 0")
	}
}
Exemplo n.º 13
0
func testMasterUpgrade(ip, v string, mUp func(v string) error) {
	Logf("Starting async validation")
	httpClient := http.Client{Timeout: 2 * time.Second}
	done := make(chan struct{}, 1)
	// Let's make sure we've finished the heartbeat before shutting things down.
	var wg sync.WaitGroup
	go util.Until(func() {
		defer GinkgoRecover()
		wg.Add(1)
		defer wg.Done()

		if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) {
			r, err := httpClient.Get("http://" + ip)
			if err != nil {
				Logf("Error reaching %s: %v", ip, err)
				return false, nil
			}
			if r.StatusCode < http.StatusOK || r.StatusCode >= http.StatusNotFound {
				Logf("Bad response; status: %d, response: %v", r.StatusCode, r)
				return false, nil
			}
			return true, nil
		}); err != nil {
			// We log the error here because the test will fail at the very end
			// because this validation runs in another goroutine. Without this,
			// a failure is very confusing to track down because from the logs
			// everything looks fine.
			msg := fmt.Sprintf("Failed to contact service during master upgrade: %v", err)
			Logf(msg)
			Failf(msg)
		}
	}, 200*time.Millisecond, done)

	Logf("Starting master upgrade")
	expectNoError(mUp(v))
	done <- struct{}{}
	Logf("Stopping async validation")
	wg.Wait()
	Logf("Master upgrade complete")
}
Exemplo n.º 14
0
// RunUntil begins polling. It starts a goroutine and returns immediately.
// It will stop when the stopCh is closed.
func (p *Poller) RunUntil(stopCh <-chan struct{}) {
	go util.Until(p.run, p.period, stopCh)
}
Exemplo n.º 15
0
// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed.
// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed.
func (r *Reflector) RunUntil(stopCh <-chan struct{}) {
	go util.Until(func() { r.listAndWatch(stopCh) }, r.period, stopCh)
}
Exemplo n.º 16
0
// Run begins watching and scheduling. It starts a goroutine and returns immediately.
func (s *Scheduler) Run() {
	go util.Until(s.scheduleOne, 0, s.config.StopEverything)
}