func startComponents(firstManifestURL, secondManifestURL string) (string, string) { // Setup servers := []string{} glog.Infof("Creating etcd client pointing to %v", servers) handler := delegateHandler{} apiServer := httptest.NewServer(&handler) etcdClient := etcd.NewClient(servers) sleep := 4 * time.Second ok := false for i := 0; i < 3; i++ { keys, err := etcdClient.Get("/", false, false) if err != nil { glog.Warningf("Unable to list root etcd keys: %v", err) if i < 2 { time.Sleep(sleep) sleep = sleep * sleep } continue } for _, node := range keys.Node.Nodes { if _, err := etcdClient.Delete(node.Key, true); err != nil { glog.Fatalf("Unable delete key: %v", err) } } ok = true break } if !ok { glog.Fatalf("Failed to connect to etcd") } cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.GroupAndVersion()}) // TODO: caesarxuchao: hacky way to specify version of Experimental client. // We will fix this by supporting multiple group versions in Config cl.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Extensions.GroupAndVersion()}) storageVersions := make(map[string]string) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix()) storageVersions[""] = testapi.Default.GroupAndVersion() if err != nil { glog.Fatalf("Unable to get etcd storage: %v", err) } expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("extensions").InterfacesFor, testapi.Extensions.GroupAndVersion(), etcdtest.PathPrefix()) storageVersions["extensions"] = testapi.Extensions.GroupAndVersion() if err != nil { glog.Fatalf("Unable to get etcd storage for experimental: %v", err) } storageDestinations := master.NewStorageDestinations() storageDestinations.AddAPIGroup("", etcdStorage) storageDestinations.AddAPIGroup("extensions", expEtcdStorage) // Master host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://")) if err != nil { glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err) } portNumber, err := strconv.Atoi(port) if err != nil { glog.Fatalf("Nonnumeric port? %v", err) } publicAddress := net.ParseIP(host) if publicAddress == nil { glog.Fatalf("no public address for %s", host) } // Create a master and install handlers into mux. m := master.New(&master.Config{ StorageDestinations: storageDestinations, KubeletClient: fakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableProfiling: true, APIPrefix: "/api", APIGroupPrefix: "/apis", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), ReadWritePort: portNumber, PublicAddress: publicAddress, CacheTimeout: 2 * time.Second, StorageVersions: storageVersions, }) handler.delegate = m.Handler // Scheduler schedulerConfigFactory := factory.NewConfigFactory(cl, nil) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { glog.Fatalf("Couldn't create scheduler config: %v", err) } eventBroadcaster := record.NewBroadcaster() schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(cl.Events("")) scheduler.New(schedulerConfig).Run() // ensure the service endpoints are sync'd several times within the window that the integration tests wait go endpointcontroller.NewEndpointController(cl, controller.NoResyncPeriodFunc). Run(3, util.NeverStop) // TODO: Write an integration test for the replication controllers watch. go replicationControllerPkg.NewReplicationManager(cl, controller.NoResyncPeriodFunc, replicationControllerPkg.BurstReplicas). Run(3, util.NeverStop) nodeController := nodecontroller.NewNodeController(nil, cl, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(), 40*time.Second, 60*time.Second, 5*time.Second, nil, false) nodeController.Run(5 * time.Second) cadvisorInterface := new(cadvisor.Fake) // Kubelet (localhost) testRootDir := makeTempDirOrDie("kubelet_integ_1.", "") configFilePath := makeTempDirOrDie("config", testRootDir) glog.Infof("Using %s as root dir for kubelet #1", testRootDir) fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.20"} kcfg := kubeletapp.SimpleKubelet( cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, /* KubeletPort */ 0, /* ReadOnlyPort */ api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{}, 1*time.Second, /* FileCheckFrequency */ 1*time.Second, /* HTTPCheckFrequency */ 10*time.Second, /* MinimumGCAge */ 3*time.Second, /* NodeStatusUpdateFrequency */ 10*time.Second /* SyncFrequency */) kubeletapp.RunKubelet(kcfg, nil) // Kubelet (machine) // Create a second kubelet so that the guestbook example's two redis slaves both // have a place they can schedule. testRootDir = makeTempDirOrDie("kubelet_integ_2.", "") glog.Infof("Using %s as root dir for kubelet #2", testRootDir) fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.20"} kcfg = kubeletapp.SimpleKubelet( cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, /* KubeletPort */ 0, /* ReadOnlyPort */ api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{}, 1*time.Second, /* FileCheckFrequency */ 1*time.Second, /* HTTPCheckFrequency */ 10*time.Second, /* MinimumGCAge */ 3*time.Second, /* NodeStatusUpdateFrequency */ 10*time.Second /* SyncFrequency */) kubeletapp.RunKubelet(kcfg, nil) return apiServer.URL, configFilePath }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) shutdownCloser, err := s.syncExternalShutdownWatcher() if err != nil { return err } cAdvisorInterface, err := cadvisor.New(s.CAdvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var writer utilio.Writer = &utilio.StdWriter{} var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": writer = &utilio.NsenterWriter{} dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, // ConfigFile: "" // ManifestURL: "" FileCheckFrequency: s.FileCheckFrequency, // HTTPCheckFrequency PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, // StandaloneMode: false ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CAdvisorInterface: cAdvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, ResolverConfig: s.ResolverConfig, CPUCFSQuota: s.CPUCFSQuota, Writer: writer, MaxOpenFiles: s.MaxOpenFiles, } kcfg.NodeName = kcfg.Hostname err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser) })) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }