Esempio n. 1
0
func startComponents(manifestURL string) (apiServerURL string) {
	// Setup
	servers := []string{"http://localhost:4001"}
	glog.Infof("Creating etcd client pointing to %v", servers)
	machineList := []string{"localhost", "machine"}

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)

	cl := client.New(apiServer.URL, nil)
	cl.PollPeriod = time.Second * 1
	cl.Sync = true

	// Master
	m := master.New(&master.Config{
		Client:        cl,
		EtcdServers:   servers,
		Minions:       machineList,
		PodInfoGetter: fakePodInfoGetter{},
	})
	storage, codec := m.API_v1beta1()
	handler.delegate = apiserver.Handle(storage, codec, "/api/v1beta1")

	// Scheduler
	scheduler.New((&factory.ConfigFactory{cl}).Create()).Run()

	controllerManager := controller.NewReplicationManager(cl)

	// Prove that controllerManager's watch works by making it not sync until after this
	// test is over. (Hopefully we don't take 10 minutes!)
	controllerManager.Run(10 * time.Minute)

	// Kubelet (localhost)
	cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[0]), etcdClient, cfg1.Channel("etcd"))
	config.NewSourceURL(manifestURL, 5*time.Second, cfg1.Channel("url"))
	myKubelet := kubelet.NewIntegrationTestKubelet(machineList[0], &fakeDocker1)
	go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), "localhost", 10250)
	}, 0)

	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	cfg2 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[1]), etcdClient, cfg2.Channel("etcd"))
	otherKubelet := kubelet.NewIntegrationTestKubelet(machineList[1], &fakeDocker2)
	go util.Forever(func() { otherKubelet.Run(cfg2.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(otherKubelet, cfg2.Channel("http"), "localhost", 10251)
	}, 0)

	return apiServer.URL
}
Esempio n. 2
0
func startComponents(manifestURL string) (apiServerURL string) {
	// Setup
	servers := []string{"http://localhost:4001"}
	glog.Infof("Creating etcd client pointing to %v", servers)
	machineList := []string{"localhost", "machine"}

	handler := delegateHandler{}
	apiserver := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)

	cl := client.New(apiserver.URL, nil)
	cl.PollPeriod = time.Second * 1
	cl.Sync = true

	// Master
	m := master.New(&master.Config{
		Client:        cl,
		EtcdServers:   servers,
		Minions:       machineList,
		PodInfoGetter: fakePodInfoGetter{},
	})
	handler.delegate = m.ConstructHandler("/api/v1beta1")

	controllerManager := controller.MakeReplicationManager(etcdClient, cl)
	controllerManager.Run(1 * time.Second)

	// Kubelet (localhost)
	cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[0]), etcdClient, 30*time.Second, cfg1.Channel("etcd"))
	config.NewSourceURL(manifestURL, 5*time.Second, cfg1.Channel("url"))
	myKubelet := kubelet.NewIntegrationTestKubelet(machineList[0], &fakeDocker1)
	go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0)
	go util.Forever(cfg1.Sync, 3*time.Second)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), http.DefaultServeMux, "localhost", 10250)
	}, 0)

	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	cfg2 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[1]), etcdClient, 30*time.Second, cfg2.Channel("etcd"))
	otherKubelet := kubelet.NewIntegrationTestKubelet(machineList[1], &fakeDocker2)
	go util.Forever(func() { otherKubelet.Run(cfg2.Updates()) }, 0)
	go util.Forever(cfg2.Sync, 3*time.Second)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(otherKubelet, cfg2.Channel("http"), http.DefaultServeMux, "localhost", 10251)
	}, 0)

	return apiserver.URL
}
Esempio n. 3
0
func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
	// source of all configuration
	cfg := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if kc.ConfigFile != "" {
		glog.Infof("Adding manifest file: %v", kc.ConfigFile)
		config.NewSourceFile(kc.ConfigFile, kc.FileCheckFrequency, cfg.Channel(kubelet.FileSource))
	}

	// define url config source
	if kc.ManifestURL != "" {
		glog.Infof("Adding manifest url: %v", kc.ManifestURL)
		config.NewSourceURL(kc.ManifestURL, kc.HttpCheckFrequency, cfg.Channel(kubelet.HTTPSource))
	}
	if kc.EtcdClient != nil {
		glog.Infof("Watching for etcd configs at %v", kc.EtcdClient.GetCluster())
		config.NewSourceEtcd(config.EtcdKeyForHost(kc.Hostname), kc.EtcdClient, cfg.Channel(kubelet.EtcdSource))
	}
	if kc.KubeClient != nil {
		glog.Infof("Watching apiserver")
		config.NewSourceApiserver(kc.KubeClient, kc.Hostname, cfg.Channel(kubelet.ApiserverSource))
	}
	return cfg
}
Esempio n. 4
0
func (c *config) runKubelet() {
	rootDirectory := path.Clean("/var/lib/openshift")
	minionHost := c.bindAddr
	minionPort := 10250

	cadvisorClient, err := cadvisor.NewClient("http://" + c.masterHost + ":4194")
	if err != nil {
		glog.Errorf("Error on creating cadvisor client: %v", err)
	}

	dockerClient, dockerAddr := c.Docker.GetClientOrExit()
	if err := dockerClient.Ping(); err != nil {
		glog.Errorf("WARNING: Docker could not be reached at %s.  Docker must be installed and running to start containers.\n%v", dockerAddr, err)
	} else {
		glog.Infof("Connecting to Docker at %s", dockerAddr)
	}

	etcdClient, _ := c.getEtcdClient()

	// initialize Kubelet
	os.MkdirAll(rootDirectory, 0750)
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)
	kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(minionHost), etcdClient, cfg.Channel("etcd"))
	k := kubelet.NewMainKubelet(
		minionHost,
		dockerClient,
		cadvisorClient,
		etcdClient,
		rootDirectory,
		30*time.Second)
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), minionHost, uint(minionPort))
	}, 0)
}
Esempio n. 5
0
// RunKubelet starts a Kubelet talking to dockerEndpoint
func RunKubelet(etcdClient tools.EtcdClient, hostname, dockerEndpoint string) {
	dockerClient, err := docker.NewClient(GetDockerEndpoint(dockerEndpoint))
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	// Kubelet (localhost)
	os.MkdirAll(testRootDir, 0750)
	cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(hostname), etcdClient, cfg1.Channel("etcd"))
	myKubelet := kubelet.NewIntegrationTestKubelet(hostname, testRootDir, dockerClient)
	go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), net.ParseIP("127.0.0.1"), 10250, true)
	}, 0)
}
Esempio n. 6
0
func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
	// source of all configuration
	cfg := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates, kc.Recorder)

	// define file config source
	if kc.ConfigFile != "" {
		glog.Infof("Adding manifest file: %v", kc.ConfigFile)
		config.NewSourceFile(kc.ConfigFile, kc.NodeName, kc.FileCheckFrequency, cfg.Channel(kubelet.FileSource))
	}

	// define url config source
	if kc.ManifestURL != "" {
		glog.Infof("Adding manifest url: %v", kc.ManifestURL)
		config.NewSourceURL(kc.ManifestURL, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource))
	}
	if kc.KubeClient != nil {
		glog.Infof("Watching apiserver")
		config.NewSourceApiserver(kc.KubeClient, kc.NodeName, cfg.Channel(kubelet.ApiserverSource))
	}
	return cfg
}
Esempio n. 7
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	etcd.SetLogger(util.NewLogger("etcd "))

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
	if err != nil {
		glog.Errorf("Error on creating cadvisor client: %v", err)
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	os.MkdirAll(*rootDirectory, 0750)

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient tools.EtcdClient
	if len(etcdServerList) > 0 {
		glog.Infof("Watching for etcd configs at %v", etcdServerList)
		etcdClient = etcd.NewClient(etcdServerList)
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, 30*time.Second, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		cadvisorClient,
		etcdClient,
		*rootDirectory)

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// resynchronize periodically
	// TODO: make this part of PodConfig so that it is only delivered after syncFrequency has elapsed without
	// an update
	go util.Forever(cfg.Sync, *syncFrequency)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), http.DefaultServeMux, *address, *port)
		}, 0)
	}

	// runs forever
	select {}
}
Esempio n. 8
0
func main() {
	flag.Var(&etcdServerList, "etcd_servers", "List of etcd servers to watch (http://ip:port), comma separated")

	flag.Parse()
	var endpoint string
	if len(*dockerEndpoint) > 0 {
		endpoint = *dockerEndpoint
	} else if len(os.Getenv("DOCKER_HOST")) > 0 {
		endpoint = os.Getenv("DOCKER_HOST")
	} else {
		endpoint = "unix:///var/run/docker.sock"
	}
	log.Infof("Connecting to docker on %s", endpoint)
	dockerClient, err := docker.NewClient(endpoint)
	if err != nil {
		log.Fatal("Couldn't connnect to docker.")
	}

	hostname := *hostnameOverride
	if hostname == "" {
		// Note: We use exec here instead of os.Hostname() because we
		// want the FQDN, and this is the easiest way to get it.
		fqdnHostname, hostnameErr := exec.Command("hostname", "-f").Output()
		if err != nil {
			log.Fatalf("Couldn't determine hostname: %v", hostnameErr)
		}

		// hostname(1) returns a terminating newline we need to strip.
		hostname = string(fqdnHostname)
		if len(hostname) > 0 {
			hostname = hostname[0 : len(hostname)-1]
		}
	}

	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)
	var etcdClient tools.EtcdClient
	if len(etcdServerList) > 0 {
		log.Infof("Watching for etcd configs at %v", etcdServerList)
		etcdClient = etcd.NewClient(etcdServerList)
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, 30*time.Second, cfg.Channel("etcd"))
	}

	kl := kubelet.NewMainKubelet(hostname, dockerClient, nil, etcdClient, "/")

	driver := new(mesos.MesosExecutorDriver)
	kubeletExecutor := executor.New(driver, kl)
	driver.Executor = kubeletExecutor

	go kubeletExecutor.RunKubelet()

	log.V(2).Infof("Initialize executor driver...")
	driver.Init()
	defer driver.Destroy()

	log.V(2).Infof("Executor driver is running!")
	driver.Start()

	go util.Forever(cfg.Sync, *syncFrequency)

	log.V(2).Infof("Starting kubelet server...")

	go util.Forever(func() {
		// TODO(nnielsen): Don't hardwire port, but use port from
		// resource offer.
		kubelet.ListenAndServeKubeletServer(kl, cfg.Channel("http"), http.DefaultServeMux, hostname, 10250)
	}, 1*time.Second)

	log.V(2).Infof("Starting proxy process...")
	var cmd *exec.Cmd
	if len(etcdServerList) > 0 {
		etcdServerArguments := strings.Join(etcdServerList, ",")
		cmd = exec.Command("./proxy", "-etcd_servers="+etcdServerArguments)
	} else {
		cmd = exec.Command("./proxy")
	}
	_, err = cmd.StdoutPipe()
	if err != nil {
		log.Fatal(err)
	}
	if err := cmd.Start(); err != nil {
		log.Fatal(err)
	}

	// TODO(nnielsen): Factor check-pointing into subsystem.
	dat, err := ioutil.ReadFile("/tmp/kubernetes-pods")
	if err == nil {
		var target []api.PodInfo
		err := json.Unmarshal(dat, &target)
		if err == nil {
			log.Infof("Checkpoint: '%v'", target)
		}
	}
	// Recover running containers from check pointed pod list.

	driver.Join()

	log.V(2).Infof("Cleaning up proxy process...")

	// Clean up proxy process
	cmd.Process.Kill()
}
Esempio n. 9
0
func (ks *KubeletExecutorServer) createAndInitKubelet(
	kc *app.KubeletConfig,
	hks hyperkube.Interface,
	clientConfig *client.Config,
	shutdownCloser io.Closer,
) (app.KubeletBootstrap, *kconfig.PodConfig, error) {

	// TODO(k8s): block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations
	// TODO(k8s): KubeletConfig.KubeClient should be a client interface, but client interface misses certain methods
	// used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing
	// a nil pointer to it when what we really want is a nil interface.
	var kubeClient client.Interface
	if kc.KubeClient == nil {
		kubeClient = nil
	} else {
		kubeClient = kc.KubeClient
	}

	gcPolicy := kubelet.ContainerGCPolicy{
		MinAge:             kc.MinimumGCAge,
		MaxPerPodContainer: kc.MaxPerPodContainerCount,
		MaxContainers:      kc.MaxContainerCount,
	}

	pc := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates, kc.Recorder)
	updates := pc.Channel(MESOS_CFG_SOURCE)

	klet, err := kubelet.NewMainKubelet(
		kc.Hostname,
		kc.NodeName,
		kc.DockerClient,
		kubeClient,
		kc.RootDirectory,
		kc.PodInfraContainerImage,
		kc.SyncFrequency,
		float32(kc.RegistryPullQPS),
		kc.RegistryBurst,
		gcPolicy,
		pc.SeenAllSources,
		kc.RegisterNode,
		kc.StandaloneMode,
		kc.ClusterDomain,
		net.IP(kc.ClusterDNS),
		kc.MasterServiceNamespace,
		kc.VolumePlugins,
		kc.NetworkPlugins,
		kc.NetworkPluginName,
		kc.StreamingConnectionIdleTimeout,
		kc.Recorder,
		kc.CadvisorInterface,
		kc.ImageGCPolicy,
		kc.DiskSpacePolicy,
		kc.Cloud,
		kc.NodeStatusUpdateFrequency,
		kc.ResourceContainer,
		kc.OSInterface,
		kc.CgroupRoot,
		kc.ContainerRuntime,
		kc.Mounter,
		kc.DockerDaemonContainer,
		kc.SystemContainer,
		kc.ConfigureCBR0,
		kc.PodCIDR,
		kc.MaxPods,
		kc.DockerExecHandler,
	)
	if err != nil {
		return nil, nil, err
	}

	//TODO(jdef) either configure Watch here with something useful, or else
	// get rid of it from executor.Config
	kubeletFinished := make(chan struct{})
	staticPodsConfigPath := filepath.Join(kc.RootDirectory, "static-pods")
	exec := executor.New(executor.Config{
		Kubelet:         klet,
		Updates:         updates,
		SourceName:      MESOS_CFG_SOURCE,
		APIClient:       kc.KubeClient,
		Docker:          kc.DockerClient,
		SuicideTimeout:  ks.SuicideTimeout,
		KubeletFinished: kubeletFinished,
		ShutdownAlert: func() {
			if shutdownCloser != nil {
				if e := shutdownCloser.Close(); e != nil {
					log.Warningf("failed to signal shutdown to external watcher: %v", e)
				}
			}
		},
		ExitFunc: os.Exit,
		PodStatusFunc: func(_ executor.KubeletInterface, pod *api.Pod) (*api.PodStatus, error) {
			return klet.GetRuntime().GetPodStatus(pod)
		},
		StaticPodsConfigPath: staticPodsConfigPath,
	})

	go exec.InitializeStaticPodsSource(func() {
		// Create file source only when we are called back. Otherwise, it is never marked unseen.
		fileSourceUpdates := pc.Channel(kubelet.FileSource)

		kconfig.NewSourceFile(staticPodsConfigPath, kc.Hostname, kc.FileCheckFrequency, fileSourceUpdates)
	})

	k := &kubeletExecutor{
		Kubelet:         klet,
		runProxy:        ks.RunProxy,
		proxyLogV:       ks.ProxyLogV,
		proxyExec:       ks.ProxyExec,
		proxyLogfile:    ks.ProxyLogfile,
		proxyBindall:    ks.ProxyBindall,
		address:         ks.Address,
		dockerClient:    kc.DockerClient,
		hks:             hks,
		kubeletFinished: kubeletFinished,
		executorDone:    exec.Done(),
		clientConfig:    clientConfig,
	}

	dconfig := bindings.DriverConfig{
		Executor:         exec,
		HostnameOverride: ks.HostnameOverride,
		BindingAddress:   net.IP(ks.Address),
	}
	if driver, err := bindings.NewMesosExecutorDriver(dconfig); err != nil {
		log.Fatalf("failed to create executor driver: %v", err)
	} else {
		k.driver = driver
	}

	log.V(2).Infof("Initialize executor driver...")

	k.BirthCry()
	exec.Init(k.driver)

	k.StartGarbageCollection()

	return k, pc, nil
}
Esempio n. 10
0
func startComponents(manifestURL string) (apiServerURL string) {
	// Setup
	servers := []string{"http://localhost:4001"}
	glog.Infof("Creating etcd client pointing to %v", servers)
	machineList := []string{"localhost", "machine"}

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)
	sleep := 4 * time.Second
	ok := false
	for i := 0; i < 3; i++ {
		keys, err := etcdClient.Get("/", false, false)
		if err != nil {
			glog.Warningf("Unable to list root etcd keys: %v", err)
			if i < 2 {
				time.Sleep(sleep)
				sleep = sleep * sleep
			}
			continue
		}
		for _, node := range keys.Node.Nodes {
			if _, err := etcdClient.Delete(node.Key, true); err != nil {
				glog.Fatalf("Unable delete key: %v", err)
			}
		}
		ok = true
		break
	}
	if !ok {
		glog.Fatalf("Failed to connect to etcd")
	}

	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Version()})
	cl.PollPeriod = time.Millisecond * 100
	cl.Sync = true

	helper, err := master.NewEtcdHelper(etcdClient, "")
	if err != nil {
		glog.Fatalf("Unable to get etcd helper: %v", err)
	}

	// Master
	host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
	if err != nil {
		glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
	}
	portNumber, err := strconv.Atoi(port)
	if err != nil {
		glog.Fatalf("Nonnumeric port? %v", err)
	}

	// Create a master and install handlers into mux.
	m := master.New(&master.Config{
		Client:            cl,
		EtcdHelper:        helper,
		KubeletClient:     fakeKubeletClient{},
		EnableLogsSupport: false,
		APIPrefix:         "/api",
		Authorizer:        apiserver.NewAlwaysAllowAuthorizer(),

		ReadWritePort: portNumber,
		ReadOnlyPort:  portNumber,
		PublicAddress: host,
	})
	handler.delegate = m.Handler

	// Scheduler
	schedulerConfigFactory := &factory.ConfigFactory{cl}
	schedulerConfig := schedulerConfigFactory.Create()
	scheduler.New(schedulerConfig).Run()

	endpoints := service.NewEndpointController(cl)
	go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)

	controllerManager := replicationControllerPkg.NewReplicationManager(cl)

	// Prove that controllerManager's watch works by making it not sync until after this
	// test is over. (Hopefully we don't take 10 minutes!)
	controllerManager.Run(10 * time.Minute)

	nodeResources := &api.NodeResources{}
	minionController := minionControllerPkg.NewMinionController(nil, "", machineList, nodeResources, cl)
	minionController.Run(10 * time.Second)

	// Kubelet (localhost)
	os.MkdirAll(testRootDir, 0750)
	cfg1 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[0]), etcdClient, cfg1.Channel("etcd"))
	config.NewSourceURL(manifestURL, 5*time.Second, cfg1.Channel("url"))
	myKubelet := kubelet.NewIntegrationTestKubelet(machineList[0], testRootDir, &fakeDocker1)
	go util.Forever(func() { myKubelet.Run(cfg1.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(myKubelet, cfg1.Channel("http"), net.ParseIP("127.0.0.1"), 10250, true)
	}, 0)

	// Kubelet (machine)
	// Create a second kubelet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	cfg2 := config.NewPodConfig(config.PodConfigNotificationSnapshotAndUpdates)
	config.NewSourceEtcd(config.EtcdKeyForHost(machineList[1]), etcdClient, cfg2.Channel("etcd"))
	otherKubelet := kubelet.NewIntegrationTestKubelet(machineList[1], testRootDir, &fakeDocker2)
	go util.Forever(func() { otherKubelet.Run(cfg2.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(otherKubelet, cfg2.Channel("http"), net.ParseIP("127.0.0.1"), 10251, true)
	}, 0)

	return apiServer.URL
}
Esempio n. 11
0
func main() {
	flag.Parse()
	util.InitLogs()
	defer util.FlushLogs()
	rand.Seed(time.Now().UTC().UnixNano())

	verflag.PrintAndExitIfRequested()

	if *runonce {
		exclusiveFlag := "invalid option: --runonce and %s are mutually exclusive"
		if len(etcdServerList) > 0 {
			glog.Fatalf(exclusiveFlag, "--etcd_servers")
		}
		if *enableServer {
			glog.Infof("--runonce is set, disabling server")
			*enableServer = false
		}
	}

	etcd.SetLogger(util.NewLogger("etcd "))

	// Log the events locally too.
	record.StartLogging(glog.Infof)

	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: *allowPrivileged,
	})

	dockerClient, err := docker.NewClient(getDockerEndpoint())
	if err != nil {
		glog.Fatal("Couldn't connect to docker.")
	}

	hostname := getHostname()

	if *rootDirectory == "" {
		glog.Fatal("Invalid root directory path.")
	}
	*rootDirectory = path.Clean(*rootDirectory)
	if err := os.MkdirAll(*rootDirectory, 0750); err != nil {
		glog.Fatalf("Error creating root directory: %v", err)
	}

	// source of all configuration
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)

	// define file config source
	if *config != "" {
		kconfig.NewSourceFile(*config, *fileCheckFrequency, cfg.Channel("file"))
	}

	// define url config source
	if *manifestURL != "" {
		kconfig.NewSourceURL(*manifestURL, *httpCheckFrequency, cfg.Channel("http"))
	}

	// define etcd config source and initialize etcd client
	var etcdClient *etcd.Client
	if len(etcdServerList) > 0 {
		etcdClient = etcd.NewClient(etcdServerList)
	} else if *etcdConfigFile != "" {
		var err error
		etcdClient, err = etcd.NewClientFromFile(*etcdConfigFile)
		if err != nil {
			glog.Fatalf("Error with etcd config file: %v", err)
		}
	}

	if etcdClient != nil {
		glog.Infof("Watching for etcd configs at %v", etcdClient.GetCluster())
		kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(hostname), etcdClient, cfg.Channel("etcd"))
	}

	// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
	// up into "per source" synchronizations

	k := kubelet.NewMainKubelet(
		getHostname(),
		dockerClient,
		etcdClient,
		*rootDirectory,
		*networkContainerImage,
		*syncFrequency,
		float32(*registryPullQPS),
		*registryBurst,
		*minimumGCAge,
		*maxContainerCount)

	k.BirthCry()

	go func() {
		util.Forever(func() {
			err := k.GarbageCollectContainers()
			if err != nil {
				glog.Errorf("Garbage collect failed: %v", err)
			}
		}, time.Minute*1)
	}()

	go func() {
		defer util.HandleCrash()
		// TODO: Monitor this connection, reconnect if needed?
		glog.V(1).Infof("Trying to create cadvisor client.")
		cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
		if err != nil {
			glog.Errorf("Error on creating cadvisor client: %v", err)
			return
		}
		glog.V(1).Infof("Successfully created cadvisor client.")
		k.SetCadvisorClient(cadvisorClient)
	}()

	// TODO: These should probably become more plugin-ish: register a factory func
	// in each checker's init(), iterate those here.
	health.AddHealthChecker(health.NewExecHealthChecker(k))
	health.AddHealthChecker(health.NewHTTPHealthChecker(&http.Client{}))
	health.AddHealthChecker(&health.TCPHealthChecker{})

	// process pods and exit.
	if *runonce {
		if _, err := k.RunOnce(cfg.Updates()); err != nil {
			glog.Fatalf("--runonce failed: %v", err)
		}
		return
	}

	// start the kubelet
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)

	// start the kubelet server
	if *enableServer {
		go util.Forever(func() {
			kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), net.IP(address), *port, *enableDebuggingHandlers)
		}, 0)
	}

	// runs forever
	select {}
}
Esempio n. 12
0
func (c *config) startAllInOne() {
	minionHost := "127.0.0.1"
	minionPort := 10250
	rootDirectory := path.Clean("/var/lib/openshift")
	osAddr := c.ListenAddr

	osPrefix := "/osapi/v1beta1"
	kubePrefix := "/api/v1beta1"
	kubeClient, err := kubeclient.New("http://"+osAddr, nil)
	if err != nil {
		glog.Fatalf("Unable to configure client - bad URL: %v", err)
	}
	osClient, err := osclient.New("http://"+osAddr, nil)
	if err != nil {
		glog.Fatalf("Unable to configure client - bad URL: %v", err)
	}

	etcdAddr := "127.0.0.1:4001"
	etcdServers := []string{} // default
	etcdConfig := etcdconfig.New()
	etcdConfig.BindAddr = etcdAddr
	etcdConfig.DataDir = "openshift.local.etcd"
	etcdConfig.Name = "openshift.local"

	// check docker connection
	dockerClient, dockerAddr := c.Docker.GetClientOrExit()
	if err := dockerClient.Ping(); err != nil {
		glog.Errorf("WARNING: Docker could not be reached at %s.  Docker must be installed and running to start containers.\n%v", dockerAddr, err)
	} else {
		glog.Infof("Connecting to Docker at %s", dockerAddr)
	}

	cadvisorClient, err := cadvisor.NewClient("http://127.0.0.1:4194")
	if err != nil {
		glog.Errorf("Error on creating cadvisor client: %v", err)
	}

	// initialize etcd
	etcdServer := etcd.New(etcdConfig)
	go util.Forever(func() {
		glog.Infof("Started etcd at http://%s", etcdAddr)
		etcdServer.Run()
	}, 0)

	etcdClient := etcdclient.NewClient(etcdServers)
	for i := 0; ; i += 1 {
		_, err := etcdClient.Get("/", false, false)
		if err == nil || tools.IsEtcdNotFound(err) {
			break
		}
		if i > 100 {
			glog.Fatal("Could not reach etcd: %v", err)
		}
		time.Sleep(50 * time.Millisecond)
	}

	// initialize Kubelet
	os.MkdirAll(rootDirectory, 0750)
	cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)
	kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(minionHost), etcdClient, cfg.Channel("etcd"))
	k := kubelet.NewMainKubelet(
		minionHost,
		dockerClient,
		cadvisorClient,
		etcdClient,
		rootDirectory,
		30*time.Second)
	go util.Forever(func() { k.Run(cfg.Updates()) }, 0)
	go util.Forever(func() {
		kubelet.ListenAndServeKubeletServer(k, cfg.Channel("http"), minionHost, uint(minionPort))
	}, 0)

	imageRegistry := image.NewEtcdRegistry(etcdClient)

	// initialize OpenShift API
	storage := map[string]apiserver.RESTStorage{
		"builds":                  buildregistry.NewStorage(build.NewEtcdRegistry(etcdClient)),
		"buildConfigs":            buildconfigregistry.NewStorage(build.NewEtcdRegistry(etcdClient)),
		"images":                  image.NewImageStorage(imageRegistry),
		"imageRepositories":       image.NewImageRepositoryStorage(imageRegistry),
		"imageRepositoryMappings": image.NewImageRepositoryMappingStorage(imageRegistry, imageRegistry),
		"templateConfigs":         template.NewStorage(),
	}

	osMux := http.NewServeMux()

	// initialize Kubernetes API
	podInfoGetter := &kubeclient.HTTPPodInfoGetter{
		Client: http.DefaultClient,
		Port:   uint(minionPort),
	}
	masterConfig := &master.Config{
		Client:             kubeClient,
		EtcdServers:        etcdServers,
		HealthCheckMinions: true,
		Minions:            []string{minionHost},
		PodInfoGetter:      podInfoGetter,
	}
	m := master.New(masterConfig)

	apiserver.NewAPIGroup(m.API_v1beta1()).InstallREST(osMux, kubePrefix)
	apiserver.NewAPIGroup(storage, runtime.Codec).InstallREST(osMux, osPrefix)
	apiserver.InstallSupport(osMux)

	osApi := &http.Server{
		Addr:           osAddr,
		Handler:        apiserver.RecoverPanics(osMux),
		ReadTimeout:    5 * time.Minute,
		WriteTimeout:   5 * time.Minute,
		MaxHeaderBytes: 1 << 20,
	}

	go util.Forever(func() {
		glog.Infof("Started Kubernetes API at http://%s%s", osAddr, kubePrefix)
		glog.Infof("Started OpenShift API at http://%s%s", osAddr, osPrefix)
		glog.Fatal(osApi.ListenAndServe())
	}, 0)

	// initialize kube proxy
	serviceConfig := pconfig.NewServiceConfig()
	endpointsConfig := pconfig.NewEndpointsConfig()
	pconfig.NewConfigSourceEtcd(etcdClient,
		serviceConfig.Channel("etcd"),
		endpointsConfig.Channel("etcd"))
	loadBalancer := proxy.NewLoadBalancerRR()
	proxier := proxy.NewProxier(loadBalancer)
	serviceConfig.RegisterHandler(proxier)
	endpointsConfig.RegisterHandler(loadBalancer)
	glog.Infof("Started Kubernetes Proxy")

	// initialize replication manager
	controllerManager := controller.NewReplicationManager(kubeClient)
	controllerManager.Run(10 * time.Second)
	glog.Infof("Started Kubernetes Replication Manager")

	// initialize scheduler
	configFactory := &factory.ConfigFactory{Client: kubeClient}
	config := configFactory.Create()
	s := scheduler.New(config)
	s.Run()
	glog.Infof("Started Kubernetes Scheduler")

	// initialize build controller
	dockerBuilderImage := env("OPENSHIFT_DOCKER_BUILDER_IMAGE", "openshift/docker-builder")
	useHostDockerSocket := len(env("USE_HOST_DOCKER_SOCKET", "")) > 0
	stiBuilderImage := env("OPENSHIFT_STI_BUILDER_IMAGE", "openshift/sti-builder")
	dockerRegistry := env("DOCKER_REGISTRY", "")

	buildStrategies := map[buildapi.BuildType]build.BuildJobStrategy{
		buildapi.DockerBuildType: strategy.NewDockerBuildStrategy(dockerBuilderImage, useHostDockerSocket),
		buildapi.STIBuildType:    strategy.NewSTIBuildStrategy(stiBuilderImage, useHostDockerSocket),
	}

	buildController := build.NewBuildController(kubeClient, osClient, buildStrategies, dockerRegistry, 1200)
	buildController.Run(10 * time.Second)

	select {}
}