func (s *SchedulerServer) failover(driver bindings.SchedulerDriver, hks hyperkube.Interface) error { if driver != nil { stat, err := driver.Stop(true) if stat != mesos.Status_DRIVER_STOPPED { return fmt.Errorf("failed to stop driver for failover, received unexpected status code: %v", stat) } else if err != nil { return err } } // there's no guarantee that all goroutines are actually programmed intelligently with 'done' // signals, so we'll need to restart if we want to really stop everything // run the same command that we were launched with //TODO(jdef) assumption here is that the sheduler is the only service running in this process, we should probably validate that somehow args := []string{} flags := pflag.CommandLine if hks != nil { args = append(args, hks.Name()) flags = hks.Flags() } flags.Visit(func(flag *pflag.Flag) { if flag.Name != "api-servers" && flag.Name != "etcd-servers" { args = append(args, fmt.Sprintf("--%s=%s", flag.Name, flag.Value.String())) } }) if !s.Graceful { args = append(args, "--graceful") } if len(s.APIServerList) > 0 { args = append(args, "--api-servers="+strings.Join(s.APIServerList, ",")) } if len(s.EtcdServerList) > 0 { args = append(args, "--etcd-servers="+strings.Join(s.EtcdServerList, ",")) } args = append(args, flags.Args()...) log.V(1).Infof("spawning scheduler for graceful failover: %s %+v", s.executable, args) cmd := exec.Command(s.executable, args...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.SysProcAttr = makeDisownedProcAttr() // TODO(jdef) pass in a pipe FD so that we can block, waiting for the child proc to be ready //cmd.ExtraFiles = []*os.File{} exitcode := 0 log.Flush() // TODO(jdef) it would be really nice to ensure that no one else in our process was still logging if err := cmd.Start(); err != nil { //log to stdtout here to avoid conflicts with normal stderr logging fmt.Fprintf(os.Stdout, "failed to spawn failover process: %v\n", err) os.Exit(1) } os.Exit(exitcode) select {} // will never reach here }
func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.ExecutorInfo, error) { ci := &mesos.CommandInfo{ Shell: proto.Bool(false), } if s.executorPath != "" { uri, executorCmd := s.serveFrameworkArtifact(s.executorPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", executorCmd)) ci.Arguments = append(ci.Arguments, ci.GetValue()) } else if !hks.FindServer(hyperkube.CommandMinion) { return nil, fmt.Errorf("either run this scheduler via km or else --executor-path is required") } else { if strings.Index(s.kmPath, "://") > 0 { if strings.HasPrefix(s.kmPath, "file://") { // If `kmPath` started with "file://", `km` in agent local path was used. ci.Value = proto.String(strings.TrimPrefix(s.kmPath, "file://")) } else { // URI could point directly to executable, e.g. hdfs:///km // or else indirectly, e.g. http://acmestorage/tarball.tgz // so we assume that for this case the command will always "km" ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(s.kmPath), Executable: proto.Bool(true)}) ci.Value = proto.String("./km") // TODO(jdef) extract constant } } else if s.kmPath != "" { uri, kmCmd := s.serveFrameworkArtifact(s.kmPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } else { uri, kmCmd := s.serveFrameworkArtifact(s.executable) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } ci.Arguments = append(ci.Arguments, ci.GetValue(), hyperkube.CommandMinion) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.runProxy)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.proxyBindall)) if s.proxyKubeconfig != "" { //TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file uri, basename := s.serveFrameworkArtifact(s.proxyKubeconfig) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri)}) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-kubeconfig=%v", basename)) } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-logv=%d", s.proxyLogV)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-mode=%v", s.proxyMode)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--path-override=%s", s.minionPathOverride)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-size=%v", s.minionLogMaxSize.String())) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-backups=%d", s.minionLogMaxBackups)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-age=%d", s.minionLogMaxAgeInDays)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--conntrack-max=%d", s.conntrackMax)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--conntrack-tcp-timeout-established=%d", s.conntrackTCPTimeoutEstablished)) } if s.sandboxOverlay != "" { if _, err := os.Stat(s.sandboxOverlay); os.IsNotExist(err) { return nil, fmt.Errorf("Sandbox overlay archive not found: %s", s.sandboxOverlay) } uri, _ := s.serveFrameworkArtifact(s.sandboxOverlay) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(true)}) } if s.dockerCfgPath != "" { uri := s.serveFrameworkArtifactWithFilename(s.dockerCfgPath, ".dockercfg") ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(false)}) } //TODO(jdef): provide some way (env var?) for users to customize executor config //TODO(jdef): set -address to 127.0.0.1 if `address` is 127.0.0.1 var apiServerArgs string if len(s.kubeletApiServerList) > 0 { apiServerArgs = strings.Join(s.kubeletApiServerList, ",") } else { apiServerArgs = strings.Join(s.apiServerList, ",") } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--api-servers=%s", apiServerArgs)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.executorLogV)) // this also applies to the minion ci.Arguments = append(ci.Arguments, fmt.Sprintf("--allow-privileged=%t", s.allowPrivileged)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--suicide-timeout=%v", s.executorSuicideTimeout)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-launch-grace-period=%v", s.launchGracePeriod)) if s.executorBindall { //TODO(jdef) determine whether hostname-override is really needed for bindall because //it conflicts with kubelet node status checks/updates //ci.Arguments = append(ci.Arguments, "--hostname-override=0.0.0.0") ci.Arguments = append(ci.Arguments, "--address=0.0.0.0") } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-cgroup-prefix=%v", s.mesosCgroupPrefix)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.kubeletCadvisorPort)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.kubeletSyncFrequency)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--contain-pod-resources=%t", s.containPodResources)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--enable-debugging-handlers=%t", s.kubeletEnableDebuggingHandlers)) if s.kubeletKubeconfig != "" { //TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file if s.kubeletKubeconfig != s.proxyKubeconfig { if filepath.Base(s.kubeletKubeconfig) == filepath.Base(s.proxyKubeconfig) { // scheduler serves kubelet-kubeconfig and proxy-kubeconfig by their basename // we currently don't support the case where the 2 kubeconfig files have the same // basename but different absolute name, e.g., /kubelet/kubeconfig and /proxy/kubeconfig return nil, fmt.Errorf("if kubelet-kubeconfig and proxy-kubeconfig are different, they must have different basenames") } // allows kubelet-kubeconfig and proxy-kubeconfig to point to the same file uri, _ := s.serveFrameworkArtifact(s.kubeletKubeconfig) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri)}) } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--kubeconfig=%s", filepath.Base(s.kubeletKubeconfig))) } appendOptional := func(name string, value string) { if value != "" { ci.Arguments = append(ci.Arguments, fmt.Sprintf("--%s=%s", name, value)) } } if s.clusterDNS != nil { appendOptional("cluster-dns", s.clusterDNS.String()) } appendOptional("cluster-domain", s.clusterDomain) appendOptional("root-dir", s.kubeletRootDirectory) appendOptional("docker-endpoint", s.kubeletDockerEndpoint) appendOptional("pod-infra-container-image", s.kubeletPodInfraContainerImage) appendOptional("host-network-sources", s.kubeletHostNetworkSources) appendOptional("network-plugin", s.kubeletNetworkPluginName) // TODO(jdef) this code depends on poorly scoped cadvisor flags, will need refactoring soon appendOptional(flagutil.Cadvisor.HousekeepingInterval.NameValue()) appendOptional(flagutil.Cadvisor.GlobalHousekeepingInterval.NameValue()) log.V(1).Infof("prepared executor command %q with args '%+v'", ci.GetValue(), ci.Arguments) // Create mesos scheduler driver. execInfo := &mesos.ExecutorInfo{ Command: ci, Name: proto.String(cloud.KubernetesExecutorName), Source: proto.String(execcfg.DefaultInfoSource), } // Check for staticPods data, staticPodCPUs, staticPodMem := s.prepareStaticPods() // set prototype resource. During procument these act as the blue print only. // In a final ExecutorInfo they might differ due to different procured // resource roles. execInfo.Resources = []*mesos.Resource{ mutil.NewScalarResource("cpus", float64(s.mesosExecutorCPUs)+staticPodCPUs), mutil.NewScalarResource("mem", float64(s.mesosExecutorMem)+staticPodMem), } // calculate the ExecutorInfo hash to be used for validating compatibility. // It is used to determine whether a running executor is compatible with the // current scheduler configuration. If it is not, offers for those nodes // are declined by our framework and the operator has to phase out those // running executors in a cluster. execInfo.ExecutorId = executorinfo.NewID(execInfo) execInfo.Data = data log.V(1).Infof("started with executor id %v", execInfo.ExecutorId.GetValue()) return execInfo, nil }
func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.ExecutorInfo, *uid.UID, error) { ci := &mesos.CommandInfo{ Shell: proto.Bool(false), } if s.ExecutorPath != "" { uri, executorCmd := s.serveFrameworkArtifact(s.ExecutorPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", executorCmd)) } else if !hks.FindServer(hyperkube.CommandMinion) { return nil, nil, fmt.Errorf("either run this scheduler via km or else --executor-path is required") } else { if strings.Index(s.KMPath, "://") > 0 { // URI could point directly to executable, e.g. hdfs:///km // or else indirectly, e.g. http://acmestorage/tarball.tgz // so we assume that for this case the command will always "km" ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(s.KMPath), Executable: proto.Bool(true)}) ci.Value = proto.String("./km") // TODO(jdef) extract constant } else if s.KMPath != "" { uri, kmCmd := s.serveFrameworkArtifact(s.KMPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } else { uri, kmCmd := s.serveFrameworkArtifact(s.executable) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } ci.Arguments = append(ci.Arguments, hyperkube.CommandMinion) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.RunProxy)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.ProxyBindall)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-logv=%d", s.ProxyLogV)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--path-override=%s", s.MinionPathOverride)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-size=%v", s.MinionLogMaxSize.String())) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-backups=%d", s.MinionLogMaxBackups)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-age=%d", s.MinionLogMaxAgeInDays)) } if s.DockerCfgPath != "" { uri := s.serveFrameworkArtifactWithFilename(s.DockerCfgPath, ".dockercfg") ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(false)}) } //TODO(jdef): provide some way (env var?) for users to customize executor config //TODO(jdef): set -address to 127.0.0.1 if `address` is 127.0.0.1 apiServerArgs := strings.Join(s.APIServerList, ",") ci.Arguments = append(ci.Arguments, fmt.Sprintf("--api-servers=%s", apiServerArgs)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.ExecutorLogV)) // this also applies to the minion ci.Arguments = append(ci.Arguments, fmt.Sprintf("--allow-privileged=%t", s.AllowPrivileged)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--suicide-timeout=%v", s.ExecutorSuicideTimeout)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-launch-grace-period=%v", s.LaunchGracePeriod)) if s.ExecutorBindall { //TODO(jdef) determine whether hostname-override is really needed for bindall because //it conflicts with kubelet node status checks/updates //ci.Arguments = append(ci.Arguments, "--hostname-override=0.0.0.0") ci.Arguments = append(ci.Arguments, "--address=0.0.0.0") } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-cgroup-prefix=%v", s.MesosCgroupPrefix)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.KubeletCadvisorPort)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.KubeletSyncFrequency)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--contain-pod-resources=%t", s.ContainPodResources)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--enable-debugging-handlers=%t", s.EnableProfiling)) if s.AuthPath != "" { //TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file uri, basename := s.serveFrameworkArtifact(s.AuthPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri)}) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--auth-path=%s", basename)) } appendOptional := func(name string, value string) { if value != "" { ci.Arguments = append(ci.Arguments, fmt.Sprintf("--%s=%s", name, value)) } } if s.ClusterDNS != nil { appendOptional("cluster-dns", s.ClusterDNS.String()) } appendOptional("cluster-domain", s.ClusterDomain) appendOptional("root-dir", s.KubeletRootDirectory) appendOptional("docker-endpoint", s.KubeletDockerEndpoint) appendOptional("pod-infra-container-image", s.KubeletPodInfraContainerImage) appendOptional("host-network-sources", s.KubeletHostNetworkSources) appendOptional("network-plugin", s.KubeletNetworkPluginName) log.V(1).Infof("prepared executor command %q with args '%+v'", ci.GetValue(), ci.Arguments) // Create mesos scheduler driver. execInfo := &mesos.ExecutorInfo{ Command: ci, Name: proto.String(execcfg.DefaultInfoName), Source: proto.String(execcfg.DefaultInfoSource), } // Check for staticPods var staticPodCPUs, staticPodMem float64 if s.StaticPodsConfigPath != "" { bs, paths, err := archive.ZipDir(s.StaticPodsConfigPath) if err != nil { return nil, nil, err } // try to read pod files and sum resources // TODO(sttts): don't terminate when static pods are broken, but skip them // TODO(sttts): add a directory watch and tell running executors about updates for _, podPath := range paths { podJson, err := ioutil.ReadFile(podPath) if err != nil { return nil, nil, fmt.Errorf("error reading static pod spec: %v", err) } pod := api.Pod{} err = json.Unmarshal(podJson, &pod) if err != nil { return nil, nil, fmt.Errorf("error parsing static pod spec at %v: %v", podPath, err) } // TODO(sttts): allow unlimited static pods as well and patch in the default resource limits unlimitedCPU := mresource.LimitPodCPU(&pod, s.DefaultContainerCPULimit) unlimitedMem := mresource.LimitPodMem(&pod, s.DefaultContainerMemLimit) if unlimitedCPU { return nil, nil, fmt.Errorf("found static pod without limit on cpu resources: %v", podPath) } if unlimitedMem { return nil, nil, fmt.Errorf("found static pod without limit on memory resources: %v", podPath) } cpu := mresource.PodCPULimit(&pod) mem := mresource.PodMemLimit(&pod) log.V(2).Infof("reserving %.2f cpu shares and %.2f MB of memory to static pod %s", cpu, mem, pod.Name) staticPodCPUs += float64(cpu) staticPodMem += float64(mem) } // pass zipped pod spec to executor execInfo.Data = bs } execInfo.Resources = []*mesos.Resource{ mutil.NewScalarResource("cpus", float64(s.MesosExecutorCPUs)+staticPodCPUs), mutil.NewScalarResource("mem", float64(s.MesosExecutorMem)+staticPodMem), } // calculate ExecutorInfo hash to be used for validating compatibility // of ExecutorInfo's generated by other HA schedulers. ehash := hashExecutorInfo(execInfo) eid := uid.New(ehash, execcfg.DefaultInfoID) execInfo.ExecutorId = &mesos.ExecutorID{Value: proto.String(eid.String())} return execInfo, eid, nil }
func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.ExecutorInfo, *uid.UID, error) { ci := &mesos.CommandInfo{ Shell: proto.Bool(false), } if s.executorPath != "" { uri, executorCmd := s.serveFrameworkArtifact(s.executorPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", executorCmd)) } else if !hks.FindServer(hyperkube.CommandMinion) { return nil, nil, fmt.Errorf("either run this scheduler via km or else --executor-path is required") } else { if strings.Index(s.kmPath, "://") > 0 { // URI could point directly to executable, e.g. hdfs:///km // or else indirectly, e.g. http://acmestorage/tarball.tgz // so we assume that for this case the command will always "km" ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(s.kmPath), Executable: proto.Bool(true)}) ci.Value = proto.String("./km") // TODO(jdef) extract constant } else if s.kmPath != "" { uri, kmCmd := s.serveFrameworkArtifact(s.kmPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } else { uri, kmCmd := s.serveFrameworkArtifact(s.executable) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(true)}) ci.Value = proto.String(fmt.Sprintf("./%s", kmCmd)) } ci.Arguments = append(ci.Arguments, hyperkube.CommandMinion) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--run-proxy=%v", s.runProxy)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-bindall=%v", s.proxyBindall)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--proxy-logv=%d", s.proxyLogV)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--path-override=%s", s.minionPathOverride)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-size=%v", s.minionLogMaxSize.String())) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-backups=%d", s.minionLogMaxBackups)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--max-log-age=%d", s.minionLogMaxAgeInDays)) } if s.sandboxOverlay != "" { if _, err := os.Stat(s.sandboxOverlay); os.IsNotExist(err) { return nil, nil, fmt.Errorf("Sandbox overlay archive not found: %s", s.sandboxOverlay) } uri, _ := s.serveFrameworkArtifact(s.sandboxOverlay) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(true)}) } if s.dockerCfgPath != "" { uri := s.serveFrameworkArtifactWithFilename(s.dockerCfgPath, ".dockercfg") ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri), Executable: proto.Bool(false), Extract: proto.Bool(false)}) } //TODO(jdef): provide some way (env var?) for users to customize executor config //TODO(jdef): set -address to 127.0.0.1 if `address` is 127.0.0.1 apiServerArgs := strings.Join(s.apiServerList, ",") ci.Arguments = append(ci.Arguments, fmt.Sprintf("--api-servers=%s", apiServerArgs)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--v=%d", s.executorLogV)) // this also applies to the minion ci.Arguments = append(ci.Arguments, fmt.Sprintf("--allow-privileged=%t", s.allowPrivileged)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--suicide-timeout=%v", s.executorSuicideTimeout)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-launch-grace-period=%v", s.launchGracePeriod)) if s.executorBindall { //TODO(jdef) determine whether hostname-override is really needed for bindall because //it conflicts with kubelet node status checks/updates //ci.Arguments = append(ci.Arguments, "--hostname-override=0.0.0.0") ci.Arguments = append(ci.Arguments, "--address=0.0.0.0") } ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-cgroup-prefix=%v", s.mesosCgroupPrefix)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.kubeletCadvisorPort)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.kubeletSyncFrequency)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--contain-pod-resources=%t", s.containPodResources)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--enable-debugging-handlers=%t", s.enableProfiling)) if s.authPath != "" { //TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file uri, basename := s.serveFrameworkArtifact(s.authPath) ci.Uris = append(ci.Uris, &mesos.CommandInfo_URI{Value: proto.String(uri)}) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--auth-path=%s", basename)) } appendOptional := func(name string, value string) { if value != "" { ci.Arguments = append(ci.Arguments, fmt.Sprintf("--%s=%s", name, value)) } } if s.clusterDNS != nil { appendOptional("cluster-dns", s.clusterDNS.String()) } appendOptional("cluster-domain", s.clusterDomain) appendOptional("root-dir", s.kubeletRootDirectory) appendOptional("docker-endpoint", s.kubeletDockerEndpoint) appendOptional("pod-infra-container-image", s.kubeletPodInfraContainerImage) appendOptional("host-network-sources", s.kubeletHostNetworkSources) appendOptional("network-plugin", s.kubeletNetworkPluginName) log.V(1).Infof("prepared executor command %q with args '%+v'", ci.GetValue(), ci.Arguments) // Create mesos scheduler driver. execInfo := &mesos.ExecutorInfo{ Command: ci, Name: proto.String(cloud.KubernetesExecutorName), Source: proto.String(execcfg.DefaultInfoSource), } // Check for staticPods data, staticPodCPUs, staticPodMem := s.prepareStaticPods() execInfo.Resources = []*mesos.Resource{ mutil.NewScalarResource("cpus", float64(s.mesosExecutorCPUs)+staticPodCPUs), mutil.NewScalarResource("mem", float64(s.mesosExecutorMem)+staticPodMem), } // calculate ExecutorInfo hash to be used for validating compatibility // of ExecutorInfo's generated by other HA schedulers. ehash := hashExecutorInfo(execInfo) eid := uid.New(ehash, execcfg.DefaultInfoID) execInfo.ExecutorId = &mesos.ExecutorID{Value: proto.String(eid.String())} execInfo.Data = data return execInfo, eid, nil }