func TestScheduler(t *testing.T) {
	defer record.StartLogging(t.Logf).Stop()
	errS := errors.New("scheduler")
	errB := errors.New("binder")

	table := []struct {
		injectBindError error
		sendPod         *api.Pod
		algo            scheduler.Scheduler
		expectErrorPod  *api.Pod
		expectError     error
		expectBind      *api.Binding
		eventReason     string
	}{
		{
			sendPod:     podWithID("foo"),
			algo:        mockScheduler{"machine1", nil},
			expectBind:  &api.Binding{PodID: "foo", Host: "machine1"},
			eventReason: "scheduled",
		}, {
			sendPod:        podWithID("foo"),
			algo:           mockScheduler{"machine1", errS},
			expectError:    errS,
			expectErrorPod: podWithID("foo"),
			eventReason:    "failedScheduling",
		}, {
			sendPod:         podWithID("foo"),
			algo:            mockScheduler{"machine1", nil},
			expectBind:      &api.Binding{PodID: "foo", Host: "machine1"},
			injectBindError: errB,
			expectError:     errB,
			expectErrorPod:  podWithID("foo"),
			eventReason:     "failedScheduling",
		},
	}

	for i, item := range table {
		var gotError error
		var gotPod *api.Pod
		var gotBinding *api.Binding
		c := &Config{
			MinionLister: scheduler.FakeMinionLister(
				api.MinionList{Items: []api.Minion{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
			),
			Algorithm: item.algo,
			Binder: fakeBinder{func(b *api.Binding) error {
				gotBinding = b
				return item.injectBindError
			}},
			Error: func(p *api.Pod, err error) {
				gotPod = p
				gotError = err
			},
			NextPod: func() *api.Pod {
				return item.sendPod
			},
		}
		s := New(c)
		called := make(chan struct{})
		events := record.GetEvents(func(e *api.Event) {
			if e, a := item.eventReason, e.Reason; e != a {
				t.Errorf("%v: expected %v, got %v", i, e, a)
			}
			close(called)
		})
		s.scheduleOne()
		if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: wanted %v, got %v", i, e, a)
		}
		<-called
		events.Stop()
	}
}
Beispiel #2
0
// TODO: Split this up?
func SetupLogging() {
	// Log the events locally too.
	record.StartLogging(glog.Infof)
}
Beispiel #3
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	if *runonce {
		exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive"
		if len(etcdServerList) > 0 {
			glog.Fatalf(exclusiveFlag, "--etcd_servers")
		}
		if *enableServer {
			glog.Infof("--runonce is set, disabling server")
			*enableServer = false
		}
	}

	etcd.SetLogger(util.NewLogger("etcd "))

	// Log the events locally too.
	record.StartLogging(glog.Infof)

	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: *allowPrivileged,
	})

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	if err := os.MkdirAll(*rootDirectory, 0750); err != nil {
		glog.Fatalf("Error creating root directory: %v", err)
	}

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient *etcd.Client
	if len(etcdServerList) > 0 {
		etcdClient = etcd.NewClient(etcdServerList)
	} else if *etcdConfigFile != "" {
		var err error
		etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)
		if err != nil {
			glog.Fatalf("Error with etcd config file: %v", err)
		}
	}

	if etcdClient != nil {
		glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster())
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		etcdClient,
		*rootDirectory,
		*networkContainerImage,
		*syncFrequency,
		float32(*registryPullQPS),
		*registryBurst,
		*minimumGCAge,
		*maxContainerCount)

	k.BirthCry()

	go func() {
		util.Forever(func() {
			err := k.GarbageCollectContainers()
			if err != nil {
				glog.Errorf("Garbage collect failed: %v", err)
			}
		}, time.Minute*1)
	}()

	go func() {
		defer util.HandleCrash()
		// TODO: Monitor this connection, reconnect if needed?
		glog.V(1).Infof("Trying to create cadvisor client.")
		cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
		if err != nil {
			glog.Errorf("Error on creating cadvisor client: %v", err)
			return
		}
		glog.V(1).Infof("Successfully created cadvisor client.")
		k.SetCadvisorClient(cadvisorClient)
	}()

	// TODO: These should probably become more plugin-ish: register a factory func
	// in each checker's init(), iterate those here.
	health.AddHealthChecker(health.NewExecHealthChecker(k))
	health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{}))
	health.AddHealthChecker(&health.TCPHealthChecker{})

	// process pods and exit.
	if *runonce {
		if _, err := k.RunOnce(cfg.Updates()); err != nil {
			glog.Fatalf("--runonce failed: %v", err)
		}
		return
	}

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers)
		}, 0)
	}

	// runs forever
	select {}
}
Beispiel #4
0
func TestEventf(t *testing.T) {
	testPod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			SelfLink:  "/api/v1beta1/pods/foo",
			Name:      "foo",
			Namespace: "baz",
			UID:       "bar",
		},
	}
	testRef, err := api.GetPartialReference(testPod, "desiredState.manifest.containers[2]")
	if err != nil {
		t.Fatal(err)
	}
	table := []struct {
		obj            runtime.Object
		status, reason string
		messageFmt     string
		elements       []interface{}
		expect         *api.Event
		expectLog      string
	}{
		{
			obj:        testRef,
			status:     "running",
			reason:     "started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "v1beta1",
					FieldPath:  "desiredState.manifest.containers[2]",
				},
				Status:  "running",
				Reason:  "started",
				Message: "some verbose message: 1",
				Source:  "eventTest",
			},
			expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:"desiredState.manifest.containers[2]"}): status: 'running', reason: 'started' some verbose message: 1`,
		},
		{
			obj:        testPod,
			status:     "running",
			reason:     "started",
			messageFmt: "some verbose message: %v",
			elements:   []interface{}{1},
			expect: &api.Event{
				ObjectMeta: api.ObjectMeta{
					Name:      "foo",
					Namespace: "baz",
				},
				InvolvedObject: api.ObjectReference{
					Kind:       "Pod",
					Name:       "foo",
					Namespace:  "baz",
					UID:        "bar",
					APIVersion: "v1beta1",
				},
				Status:  "running",
				Reason:  "started",
				Message: "some verbose message: 1",
				Source:  "eventTest",
			},
			expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:""}): status: 'running', reason: 'started' some verbose message: 1`,
		},
	}

	for _, item := range table {
		called := make(chan struct{})
		testEvents := testEventRecorder{
			OnEvent: func(event *api.Event) (*api.Event, error) {
				a := *event
				// Just check that the timestamp was set.
				if a.Timestamp.IsZero() {
					t.Errorf("timestamp wasn't set")
				}
				a.Timestamp = item.expect.Timestamp
				// Check that name has the right prefix.
				if n, en := a.Name, item.expect.Name; !strings.HasPrefix(n, en) {
					t.Errorf("Name '%v' does not contain prefix '%v'", n, en)
				}
				a.Name = item.expect.Name
				if e, a := item.expect, &a; !reflect.DeepEqual(e, a) {
					t.Errorf("diff: %s", util.ObjectDiff(e, a))
				}
				called <- struct{}{}
				return event, nil
			},
		}
		recorder := record.StartRecording(&testEvents, "eventTest")
		logger := record.StartLogging(t.Logf) // Prove that it is useful
		logger2 := record.StartLogging(func(formatter string, args ...interface{}) {
			if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
				t.Errorf("Expected '%v', got '%v'", e, a)
			}
			called <- struct{}{}
		})

		record.Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)

		<-called
		<-called
		recorder.Stop()
		logger.Stop()
		logger2.Stop()
	}
}
Beispiel #5
0
// TODO: Split this up?
func SetupLogging() {
	etcd.SetLogger(util.NewLogger("etcd "))
	// Log the events locally too.
	record.StartLogging(glog.Infof)
}