示例#1
0
// RunControllerManager starts a controller
func RunControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, nodeMemory int64) {
	if int64(int(nodeMilliCPU)) != nodeMilliCPU {
		glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d",
			nodeMilliCPU, math.MaxInt32)
		nodeMilliCPU = math.MaxInt32
	}

	if int64(int(nodeMemory)) != nodeMemory {
		glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d",
			nodeMemory, math.MaxInt32)
		nodeMemory = math.MaxInt32
	}

	nodeResources := &api.NodeResources{
		Capacity: api.ResourceList{
			resources.CPU:    util.NewIntOrStringFromInt(int(nodeMilliCPU)),
			resources.Memory: util.NewIntOrStringFromInt(int(nodeMemory)),
		},
	}
	minionController := minionControllerPkg.NewMinionController(nil, "", machineList, nodeResources, cl)
	minionController.Run(10 * time.Second)

	endpoints := service.NewEndpointController(cl)
	go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)

	controllerManager := controller.NewReplicationManager(cl)
	controllerManager.Run(10 * time.Second)
}
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()
	verifyMinionFlags()

	if len(clientConfig.Host) == 0 {
		glog.Fatal("usage: controller-manager -master <master>")
	}

	kubeClient, err := client.New(clientConfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	if int64(int(*nodeMilliCPU)) != *nodeMilliCPU {
		glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d",
			*nodeMilliCPU, math.MaxInt32)
		*nodeMilliCPU = math.MaxInt32
	}

	if int64(int(*nodeMemory)) != *nodeMemory {
		glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d",
			*nodeMemory, math.MaxInt32)
		*nodeMemory = math.MaxInt32
	}

	go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil)

	endpoints := service.NewEndpointController(kubeClient)
	go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)

	controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
	controllerManager.Run(10 * time.Second)

	cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
	nodeResources := &api.NodeResources{
		Capacity: api.ResourceList{
			resources.CPU:    util.NewIntOrStringFromInt(int(*nodeMilliCPU)),
			resources.Memory: util.NewIntOrStringFromInt(int(*nodeMemory)),
		},
	}
	minionController := minionControllerPkg.NewMinionController(cloud, *minionRegexp, machineList, nodeResources, kubeClient)
	minionController.Run(10 * time.Second)

	select {}
}
示例#3
0
// RunControllerManager starts a controller
func RunControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, nodeMemory int64) {
	if int64(int(nodeMilliCPU)) != nodeMilliCPU || int64(int(nodeMemory)) != nodeMemory {
		glog.Fatalf("Overflow, nodeCPU or nodeMemory too large for the platform")
	}
	nodeResources := &api.NodeResources{
		Capacity: api.ResourceList{
			resources.CPU:    util.NewIntOrStringFromInt(int(nodeMilliCPU)),
			resources.Memory: util.NewIntOrStringFromInt(int(nodeMemory)),
		},
	}
	minionController := minionControllerPkg.NewMinionController(nil, "", machineList, nodeResources, cl)
	minionController.Run(10 * time.Second)

	endpoints := service.NewEndpointController(cl)
	go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)

	controllerManager := controller.NewReplicationManager(cl)
	controllerManager.Run(10 * time.Second)
}
示例#4
0
func startComponents(manifestURL string) (apiServerURL string) {
	// Setup
	servers := []string{"http://localhost:4001"}
	glog.Infof("Creating etcd client pointing to %v", servers)
	machineList := []string{"localhost", "machine"}

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)
	sleep := 4 * time.Second
	ok := false
	for i := 0; i < 3; i++ {
		keys, err := etcdClient.Get("/", false, false)
		if err != nil {
			glog.Warningf("Unable to list root etcd keys: %v", err)
			if i < 2 {
				time.Sleep(sleep)
				sleep = sleep * sleep
			}
			continue
		}
		for _, node := range keys.Node.Nodes {
			if _, err := etcdClient.Delete(node.Key, true); err != nil {
				glog.Fatalf("Unable delete key: %v", err)
			}
		}
		ok = true
		break
	}
	if !ok {
		glog.Fatalf("Failed to connect to etcd")
	}

	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Version()})
	cl.PollPeriod = time.Millisecond * 100
	cl.Sync = true

	helper, err := master.NewEtcdHelper(etcdClient, "")
	if err != nil {
		glog.Fatalf("Unable to get etcd helper: %v", err)
	}

	// Master
	host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
	if err != nil {
		glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
	}
	portNumber, err := strconv.Atoi(port)
	if err != nil {
		glog.Fatalf("Nonnumeric port? %v", err)
	}

	// Create a master and install handlers into mux.
	m := master.New(&master.Config{
		Client:            cl,
		EtcdHelper:        helper,
		KubeletClient:     fakeKubeletClient{},
		EnableLogsSupport: false,
		APIPrefix:         "/api",
		Authorizer:        apiserver.NewAlwaysAllowAuthorizer(),

		ReadWritePort: portNumber,
		ReadOnlyPort:  portNumber,
		PublicAddress: host,
	})
	handler.delegate = m.Handler

	// Scheduler
	schedulerConfigFactory := &factory.ConfigFactory{cl}
	schedulerConfig := schedulerConfigFactory.Create()
	scheduler.New(schedulerConfig).Run()

	endpoints := service.NewEndpointController(cl)
	go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)

	controllerManager := replicationControllerPkg.NewReplicationManager(cl)

	// Prove that controllerManager's watch works by making it not sync until after this
	// test is over. (Hopefully we don't take 10 minutes!)
	controllerManager.Run(10 * time.Minute)

	nodeResources := &api.NodeResources{}
	minionController := minionControllerPkg.NewMinionController(nil, "", machineList, nodeResources, cl)
	minionController.Run(10 * time.Second)

	// Kubelet (localhost)
	os.MkdirAll(testRootDir, 0750)
	cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[0]), etcdClient, cfg1.Channel("etcd"))
	config.NewSourceURL(manifestURL, 5*time.Second, cfg1.Channel("url"))
	myKubelet := kubelet.NewIntegrationTestKubelet(machineList[0], testRootDir, &fakeDocker1)
	go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), net.ParseIP("127.0.0.1"), 10250, true)
	}, 0)

	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	cfg2 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[1]), etcdClient, cfg2.Channel("etcd"))
	otherKubelet := kubelet.NewIntegrationTestKubelet(machineList[1], testRootDir, &fakeDocker2)
	go util.Forever(func() { otherKubelet.Run(cfg2.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(otherKubelet, cfg2.Channel("http"), net.ParseIP("127.0.0.1"), 10251, true)
	}, 0)

	return apiServer.URL
}