func newProject(name string, cfg *config.CloudConfig, environmentLookup project.EnvironmentLookup) (*project.Project, error) { clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{}) if err != nil { return nil, err } if environmentLookup == nil { environmentLookup = rosDocker.NewConfigEnvironment(cfg) } serviceFactory := &rosDocker.ServiceFactory{ Deps: map[string][]string{}, } context := &docker.Context{ ClientFactory: clientFactory, Context: project.Context{ ProjectName: name, NoRecreate: true, // for libcompose to not recreate on project reload, looping up the boot :) EnvironmentLookup: environmentLookup, ServiceFactory: serviceFactory, Log: cfg.Rancher.Log, LoggerFactory: logger.NewColorLoggerFactory(), }, } serviceFactory.Context = context return docker.NewProject(context) }
func (s *BaseSuite) createComposeProject(c *check.C, name string) { composeProject, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: fmt.Sprintf("resources/compose/%s.yml", name), ProjectName: fmt.Sprintf("integration-test-%s", name), }, }) c.Assert(err, checker.IsNil) s.composeProject = composeProject s.started = make(chan bool) s.stopped = make(chan bool) s.deleted = make(chan bool) s.listenChan = make(chan project.Event) go s.startListening(c) composeProject.AddListener(s.listenChan) composeProject.Start() // Wait for compose to start <-s.started defer close(s.started) }
// ReadComposeVolumes reads a docker-compose.yml and return a slice of // directories to sync into the Docker Host // // "." and "./." is converted to the current directory parity is running from. // Any volume starting with "/" will be treated as an absolute path. // All other volumes (e.g. starting with "./" or without a prefix "/") will be treated as // relative paths. func ReadComposeVolumes() []string { var volumes []string files := FindDockerComposeFiles() for i, file := range files { if _, err := os.Stat(file); err == nil { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFiles: []string{file}, ProjectName: fmt.Sprintf("parity-%d", i), }, }) if err != nil { log.Info("Could not parse compose file") } for _, c := range project.Configs { for _, v := range c.Volumes { v = strings.SplitN(v, ":", 2)[0] if v == "." || v == "./." { v, _ = os.Getwd() } else if strings.Index(v, "/") != 0 { cwd, _ := os.Getwd() v = fmt.Sprintf("%s/%s", cwd, v) } volumes = append(volumes, mutils.LinuxPath(v)) } } } } return volumes }
func (c *ComposeWrapper) createDockerContext() (project.APIProject, error) { if c.context.EnvParams != nil && len(c.context.EnvParams) > 0 { file, err := os.Open(c.context.ComposeFile) if err != nil { return nil, fmt.Errorf("Error opening filename %s, %s", c.context.ComposeFile, err.Error()) } parsed, missing := envsubst.SubstFileTokens(file, c.context.EnvParams) log.Debug("Map: %v\nParsed: %s\n", c.context.EnvParams, parsed) if c.context.ErrorOnMissingParams && missing { return nil, ErrorParamsMissing } file, err = ioutil.TempFile("", "depcon") if err != nil { return nil, err } err = ioutil.WriteFile(file.Name(), []byte(parsed), os.ModeTemporary) if err != nil { return nil, err } c.context.ComposeFile = file.Name() } return docker.NewProject(&ctx.Context{ Context: project.Context{ ComposeFiles: strings.Split(c.context.ComposeFile, ","), ProjectName: c.context.ProjectName, }, }, nil) }
// CreateProject creates a compose project with the given name based on the // specified compose files func CreateProject(name string, composeFiles ...string) (*Project, error) { apiClient, err := client.NewEnvClient() if err != nil { return nil, err } composeProject, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFiles: composeFiles, ProjectName: name, }, }) if err != nil { return nil, err } p := &Project{ composeProject: composeProject, listenChan: make(chan project.Event), started: make(chan struct{}), stopped: make(chan struct{}), deleted: make(chan struct{}), client: apiClient, } // Listen to compose events go p.startListening() p.composeProject.AddListener(p.listenChan) return p, nil }
// Create implements ProjectFactory.Create using docker client. func (p *ProjectFactory) Create(c *cli.Context) (*project.Project, error) { context := &docker.Context{} context.LoggerFactory = logger.NewColorLoggerFactory() Populate(context, c) command.Populate(&context.Context, c) return docker.NewProject(context) }
func up(cmd *cobra.Command, args []string) { //make sure user is authenticated username, token, _ := Login() //read the harbor compose file harborCompose := DeserializeHarborCompose(HarborComposeFile) //read the docker compose file dockerCompose := DeserializeDockerCompose(DockerComposeFile) //use libcompose to parse compose yml file as well (since it supports the full spec) dockerComposeProject, err := docker.NewProject(&ctx.Context{ Context: project.Context{ ComposeFiles: []string{DockerComposeFile}, }, }, nil) if err != nil { log.Fatal(err) } //iterate shipments for shipmentName, shipment := range harborCompose.Shipments { fmt.Printf("Starting %v ...\n", shipmentName) if Verbose { log.Printf("processing shipment: %v/%v", shipmentName, shipment.Env) } //fetch the current state shipmentObject := GetShipmentEnvironment(username, token, shipmentName, shipment.Env) //creating a shipment is a different workflow than updating //bulk create a shipment if it doesn't exist if shipmentObject == nil { if Verbose { log.Println("shipment environment not found") } createShipment(username, token, shipmentName, dockerCompose, shipment, dockerComposeProject) } else { //make changes to harbor based on compose files updateShipment(username, token, shipmentObject, shipmentName, dockerCompose, shipment, dockerComposeProject) //TODO: desired state reconciliation } fmt.Println("done") } //shipments }
func main() { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: "docker-compose.yml", ProjectName: "yeah-compose", }, }) if err != nil { log.Fatal(err) } project.Up() }
func (c *ComposeWrapper) createDockerContext() (*project.Project, error) { clientFactory, err := docker.NewDefaultClientFactory(docker.ClientOpts{}) if err != nil { log.Fatal(err) } tlsVerify := os.Getenv(DOCKER_TLS_VERIFY) if tlsVerify == "1" { clientFactory, err = docker.NewDefaultClientFactory(docker.ClientOpts{ TLS: true, TLSVerify: true, }) if err != nil { log.Fatal(err) } } if c.context.EnvParams != nil && len(c.context.EnvParams) > 0 { file, err := os.Open(c.context.ComposeFile) if err != nil { return nil, fmt.Errorf("Error opening filename %s, %s", c.context.ComposeFile, err.Error()) } parsed, missing := envsubst.SubstFileTokens(file, c.context.ComposeFile, c.context.EnvParams) log.Debug("Map: %v\nParsed: %s\n", c.context.EnvParams, parsed) if c.context.ErrorOnMissingParams && missing { return nil, ErrorParamsMissing } file, err = ioutil.TempFile("", "depcon") if err != nil { return nil, err } err = ioutil.WriteFile(file.Name(), []byte(parsed), os.ModeTemporary) if err != nil { return nil, err } c.context.ComposeFile = file.Name() } return docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: c.context.ComposeFile, ProjectName: c.context.ProjectName, }, ClientFactory: clientFactory, }) }
// Helepr function to return a new project. func NewProject(n string) *project.Project { c := &docker.Context{ Context: project.Context{ ComposeFile: "docker-compose.yml", ProjectName: n, }, } p, err := docker.NewProject(c) if err != nil { Exit(err) } return p }
func (ComposeService) Compose(s string) error { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: "docker-compose.yml", ProjectName: "my-compose", }, }) check(err) fmt.Println(s) project.Up() return nil }
func BuildComposeProject(composeFile string) (*project.Project, error) { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: composeFile, ProjectName: projectNameByGitProject(), LoggerFactory: logger.NewColorLoggerFactory(), }, }) if err != nil { return nil, err } sanitizeConfig(project) return project, nil }
// main function func main() { fmt.Println("Feed me a compose file now:") // Read in our compose file from stdin yamlbytes, err := ioutil.ReadAll(os.Stdin) // unmarshal it so we can enumerate our services yaml.Unmarshal(yamlbytes, &services) // create a new compose project p, err = docker.NewProject(&docker.Context{ Context: project.Context{ ComposeBytes: yamlbytes, ProjectName: "my-compose", // TODO make an environment variable }, }) if err != nil { log.Fatal(err) } // create our docker client link client, _ := dockerclient.NewClientFromEnv() // make and attach our listener channel events := make(chan *dockerclient.APIEvents) client.AddEventListener(events) // start watching for events go watchEvents(events) // main loop for { // look up how many nodes we have in the cluster // this is mainly for when a node is added nodes = getNodes(client) // Print the number of nodes we found log.Printf("Nodes: %d\n", nodes) // Do the heavy lifting once scale() // sleep for a bit, then check again time.Sleep(time.Minute) // TODO make an environment variable } }
// Create implements ProjectFactory.Create using docker client. func (p *ProjectFactory) Create(c *cli.Context) (project.APIProject, error) { context := &docker.Context{} context.LoggerFactory = logger.NewColorLoggerFactory() Populate(context, c) context.ComposeFiles = c.GlobalStringSlice("file") if len(context.ComposeFiles) == 0 { context.ComposeFiles = []string{"docker-compose.yml"} if _, err := os.Stat("docker-compose.override.yml"); err == nil { context.ComposeFiles = append(context.ComposeFiles, "docker-compose.override.yml") } } context.ProjectName = c.GlobalString("project-name") return docker.NewProject(context, nil) }
func main() { project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFiles: []string{"docker-compose.yml"}, ProjectName: "yeah-compose", }, }, nil) if err != nil { log.Fatal(err) } err = project.Up(context.Background(), options.Up{}) if err != nil { log.Fatal(err) } }
func (s *APISuite) TestVolumeWithoutComposeFile(c *C) { service := ` service: image: busybox command: echo Hello world! volumes: - /etc/selinux:/etc/selinux` project, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeBytes: [][]byte{[]byte(service)}, ProjectName: "test-volume-without-compose-file", }, }) c.Assert(err, IsNil) err = project.Up() c.Assert(err, IsNil) }
// GetProject returns the Docker project from the configuration func (c *DockerCompose) GetProject() (p *project.Project, err error) { if _, err = os.Stat(c.ComposeFile); err == nil { p, err = docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFiles: []string{c.ComposeFile}, ProjectName: fmt.Sprintf("parity-%s", c.pluginConfig.ProjectNameSafe), }, }) if err != nil { log.Error("Could not create Compose project %s", err.Error()) return p, err } } else { log.Error("Could not parse compose file: %s", err.Error()) return p, err } return p, nil }
func (s *APISuite) TestEvents(c *check.C) { testRequires(c, not(DaemonVersionIs("1.9"))) composeFile := ` simple: image: busybox:latest command: top another: image: busybox:latest command: top ` project, err := docker.NewProject(&ctx.Context{ Context: project.Context{ ComposeBytes: [][]byte{[]byte(composeFile)}, ProjectName: "test-api-events", }, }, nil) c.Assert(err, check.IsNil) ctx, cancelFun := context.WithCancel(context.Background()) evts, err := project.Events(ctx) c.Assert(err, check.IsNil) go func() { c.Assert(project.Up(ctx, options.Up{}), check.IsNil) // Close after everything is done time.Sleep(250 * time.Millisecond) cancelFun() close(evts) }() actual := []events.ContainerEvent{} for event := range evts { actual = append(actual, event) } // Should be 4 events (2 create, 2 start) c.Assert(len(actual), check.Equals, 4, check.Commentf("%v", actual)) }
func (s *BaseSuite) createComposeProject(c *check.C, name string) { composeProject, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: fmt.Sprintf("resources/compose/%s.yml", name), ProjectName: fmt.Sprintf("integration-test-%s", name), }, }) c.Assert(err, checker.IsNil) s.composeProject = composeProject s.listenChan = make(chan project.ProjectEvent) go s.startListening(c) composeProject.AddListener(s.listenChan) composeProject.Start() // FIXME Wait for compose to start // Waiting for libcompose#55 to be merged // <-s.started time.Sleep(2 * time.Second) }
func newProject(name string, cfg *config.CloudConfig) (*project.Project, error) { clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{}) if err != nil { return nil, err } serviceFactory := &rosDocker.ServiceFactory{ Deps: map[string][]string{}, } context := &docker.Context{ ClientFactory: clientFactory, Context: project.Context{ ProjectName: name, EnvironmentLookup: rosDocker.NewConfigEnvironment(cfg), ServiceFactory: serviceFactory, Rebuild: true, Log: cfg.Rancher.Log, LoggerFactory: logger.NewColorLoggerFactory(), }, } serviceFactory.Context = context return docker.NewProject(context) }
func newProject(name string, cfg *config.CloudConfig, environmentLookup composeConfig.EnvironmentLookup, authLookup *rosDocker.ConfigAuthLookup) (*project.Project, error) { clientFactory, err := rosDocker.NewClientFactory(composeClient.Options{}) if err != nil { return nil, err } if environmentLookup == nil { environmentLookup = rosDocker.NewConfigEnvironment(cfg) } if authLookup == nil { authLookup = rosDocker.NewConfigAuthLookup(cfg) } serviceFactory := &rosDocker.ServiceFactory{ Deps: map[string][]string{}, } context := &docker.Context{ ClientFactory: clientFactory, AuthLookup: authLookup, Context: project.Context{ ProjectName: name, EnvironmentLookup: environmentLookup, ServiceFactory: serviceFactory, LoggerFactory: logger.NewColorLoggerFactory(), }, } serviceFactory.Context = context authLookup.SetContext(context) return docker.NewProject(context, &composeConfig.ParseOptions{ Interpolate: true, Validate: false, Preprocess: preprocessServiceMap, }) }
// Create implements ProjectFactory.Create using docker client. func (p *ProjectFactory) Create(c *cli.Context) (project.APIProject, error) { context := &ctx.Context{} context.LoggerFactory = logger.NewColorLoggerFactory() Populate(context, c) return docker.NewProject(context, nil) }
func (c *DestroyCommand) Run(args []string) int { var insecure bool flags := flag.NewFlagSet("destroy", flag.ContinueOnError) flags.BoolVar(&insecure, "insecure", false, "") flags.Usage = func() { c.Ui.Error(c.Help()) } errR, errW := io.Pipe() errScanner := bufio.NewScanner(errR) go func() { for errScanner.Scan() { c.Ui.Error(errScanner.Text()) } }() flags.SetOutput(errW) if err := flags.Parse(args); err != nil { return 1 } compose, err := config.Asset("k8s.yml") if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to read k8s.yml: %s", err)) return 1 } // Set up docker client clientFactory, err := docker.NewDefaultClientFactory( docker.ClientOpts{ TLS: !insecure, }, ) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to construct Docker client: %s", err)) return 1 } // Setup new docker-compose project context := &docker.Context{ Context: project.Context{ Log: false, ComposeBytes: compose, ProjectName: "boot2k8s", }, ClientFactory: clientFactory, } project, err := docker.NewProject(context) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to setup project: %s", err)) return 1 } if err := project.Delete(); err != nil { c.Ui.Error(fmt.Sprintf( "Failed to destroy project: %s", err)) return 1 } client := clientFactory.Create(nil) // Marshaling to post filter as API request filterLocalMasterStr, _ := json.Marshal(FilterLocalMaster) // Get Container info from deamon based on fileter localMasters, err := client.ListContainers(true, false, (string)(filterLocalMasterStr)) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to list containers: %s", err)) return 1 } if len(localMasters) > 0 { c.Ui.Output("Are you sure you want to destroy below containers?") for _, container := range localMasters { c.Ui.Output(fmt.Sprintf(" %s", container.Names[0])) } if yes, err := AskYesNo(); !yes || err != nil { if err == nil { c.Ui.Info("Containers will no be destroyed, since the confirmation") return 0 } c.Ui.Error(fmt.Sprintf( "Terminate to destroy: %s", err.Error())) return 1 } resultCh, errCh := removeContainers(client, localMasters, true, true) go func() { for res := range resultCh { c.Ui.Output(fmt.Sprintf( "Successfully destroy %s", res.Names[0])) } }() for err := range errCh { c.Ui.Error(fmt.Sprintf("Error: %s", err)) } c.Ui.Output("") } // Marshaling to post filter as API request filterK8SRelatedStr, _ := json.Marshal(FilterK8SRelated) relatedContainers, err := client.ListContainers(true, false, (string)(filterK8SRelatedStr)) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to list containers: %s", err)) return 1 } if len(relatedContainers) < 1 { // Correctly clean all containers return 0 } c.Ui.Output("Do you also remove these containers? (these are created by kubernetes)") c.Ui.Error("==> WARNING: boot2kubernetes can not detect below containers") c.Ui.Error(" are created by kubernetes which up by boot2kubernetes.") c.Ui.Error(" Be sure below these will not be used anymore!") for _, container := range relatedContainers { c.Ui.Output(fmt.Sprintf(" %s", container.Names[0])) } if yes, err := AskYesNo(); !yes || err != nil { if err == nil { c.Ui.Info("Containers will no be destroyed, since the confirmation") return 0 } c.Ui.Error(fmt.Sprintf( "Terminate to destroy: %s", err.Error())) return 1 } resultCh, errCh := removeContainers(client, relatedContainers, true, true) go func() { for res := range resultCh { c.Ui.Output(fmt.Sprintf( "Successfully removed %s", res.Names[0])) } }() for err := range errCh { c.Ui.Error(fmt.Sprintf("Error: %s", err)) } return 0 }
func main() { var err error pp, err := docker.NewProject(&docker.Context{ Context: project.Context{ ComposeFile: "docker-compose.yml", ProjectName: "helm", }, }) if err != nil { log.Fatal(err) } pp.Parse() // pp.Up() pwd, err := os.Getwd() if err != nil { fmt.Println("Error: could not determine your current directory:", err.Error()) os.Exit(1) } var ( verbose = kingpin. Flag("verbose", "Verbose mode."). Short('v'). Bool() ) // Clean up help. kingpin.UsageTemplate(kingpin.CompactUsageTemplate) kingpin.CommandLine.Help = "Fast, Docker Compose-based development for Mac OS X." kingpin.CommandLine.HelpFlag.Short('h') // Setup version printing. kingpin.Flag("version", "Show version.").PreAction(kingpin.Action(func(*kingpin.ParseContext) error { fmt.Printf("%v version %v, build %v\n", Name, Version, Build) os.Exit(0) return nil })).Bool() up := kingpin.Command("up", "Start up services defined in a Docker Compose file.") upFile := up.Flag("file", "Docker Compose file to use").Short('f').Default("docker-compose.yml").String() upDetached := up.Flag("detached", "Specify a Detached mode: Run containers in the background.").Short('d').Bool() down := kingpin.Command("down", "Stop any running services.") downFile := down.Flag("file", "Specify a Docker Compose file to use").Short('f').Default("docker-compose.yml").String() cmdHost := kingpin.Command("host", "Control and configure the Helm Docker Machine.") hostInit := cmdHost.Command("init", "Initialise the Helm Docker Machine for use.") hostInitForce := hostInit.Flag("force", "Force re-initialisation").Short('f').Bool() hostStart := cmdHost.Command("start", "Start the Helm Docker Machine.").Alias("up") hostStop := cmdHost.Command("stop", "Stop the Helm Docker Machine.").Alias("down") hostRestart := cmdHost.Command("restart", "Restart the Helm Docker Machine.") hostDestroy := cmdHost.Command("destroy", "Stop and remove the Helm Docker Machine.") hostStatus := cmdHost.Command("status", "Show status of the Helm Docker Machine.") switch kingpin.Parse() { case up.FullCommand(): log.Info("Up!") log.Info("File:", *upFile) if *upDetached { log.Info("Detaching...") } case down.FullCommand(): log.Info("Down!") log.Info("File:", *downFile) case hostInit.FullCommand(): _, err := host.NewHost(true, *hostInitForce) if err != nil { log.Fatal("The Helm host already exists, run `helm host destroy` first if you wish to recreate it. Alternatively, run init with the force flag: `helm host init --force`.") } log.Info("host init!") case hostStart.FullCommand(): helmHost, err := host.NewHost(false, false) if err != nil { log.Fatal("Could not start host, it might not exist. Try running `helm host init`.") } err = helmHost.Start() if err != nil { log.Fatal(err) } log.Info("host start!") case hostStop.FullCommand(): helmHost, err := host.NewHost(false, false) if err != nil { log.Fatal("Could not stop host, it might not exist. Try running `helm host init`.") } err = helmHost.Stop() if err != nil { log.Fatal(err) } log.Info("host stop!") case hostRestart.FullCommand(): helmHost, err := host.NewHost(false, false) if err != nil { log.Fatal("Could not restart host, it might not exist. Try running `helm host init`.") } err = helmHost.Restart() if err != nil { log.Fatal(err) } log.Info("host restart!") case hostDestroy.FullCommand(): helmHost, err := host.NewHost(false, false) if err != nil { log.Fatal("Could not destroy host, it might not exist. Try running `helm host init`.") } err = helmHost.Destroy() if err != nil { log.Fatal(err) } log.Info("host destory!") case hostStatus.FullCommand(): helmHost, err := host.NewHost(false, false) if err != nil { log.Fatal("Host doesn't exist. Try running `helm host init`.") } status, err := helmHost.Host.Driver.GetState() if err != nil { log.Fatal(err) } log.Infof("Host status: %v", status.String()) } if *verbose { log.Info("Hello, this is verbose mode.") log.Info("The current directory is:", pwd) } }
func (c *UpCommand) Run(args []string) int { var insecure bool var logLevel string flags := flag.NewFlagSet("up", flag.ContinueOnError) flags.BoolVar(&insecure, "insecure", false, "") flags.StringVar(&logLevel, "log-level", "info", "") flags.Usage = func() { c.Ui.Error(c.Help()) } errR, errW := io.Pipe() errScanner := bufio.NewScanner(errR) go func() { for errScanner.Scan() { c.Ui.Error(errScanner.Text()) } }() flags.SetOutput(errW) if err := flags.Parse(args); err != nil { return 1 } compose, err := config.Asset("k8s.yml") if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to read k8s.yml: %s", err)) return 1 } // Set up docker client clientFactory, err := docker.NewDefaultClientFactory( docker.ClientOpts{ TLS: !insecure, }, ) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to construct Docker client: %s", err)) return 1 } // Setup new docker-compose project context := &docker.Context{ Context: project.Context{ Log: false, ComposeBytes: compose, ProjectName: "boot2k8s", }, ClientFactory: clientFactory, } // Setup new docker-compose project project, err := docker.NewProject(context) if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to setup project: %s", err)) return 1 } c.Ui.Output("Start kubernetes cluster!") upErrCh := make(chan error) go func() { if err := project.Up(); err != nil { upErrCh <- err } }() client := clientFactory.Create(nil) sigCh := make(chan os.Signal) signal.Notify(sigCh, os.Interrupt) select { case <-afterContainerReady(client): c.Ui.Info("Successfully start kubernetes cluster") case err := <-upErrCh: c.Ui.Error("") c.Ui.Error(fmt.Sprintf("Failed to start containers: %s", err)) c.Ui.Error("Check docker daemon is wroking") return 1 case <-sigCh: c.Ui.Error("") c.Ui.Error("Interrupted!") c.Ui.Error("It's ambiguous that boot2kubernetes could correctly start containers.") c.Ui.Error("So request to kubelet may be failed. Check the containers are working") c.Ui.Error("with `docker ps` command by yourself.") return 1 case <-time.After(CheckTimeOut): c.Ui.Error("") c.Ui.Error("Timeout happened while waiting cluster containers are ready.") c.Ui.Error("It's ambiguous that boot2kubernetes could correctly start containers.") c.Ui.Error("So request to kubelet may be failed. Check the containers are working") c.Ui.Error("with `docker ps` command by yourself.") return 1 } // If docker runs on boot2docker, port forwarding is needed. if runtime.GOOS == "darwin" { c.Ui.Output("") c.Ui.Output("==> WARNING: You're running docker on boot2docker!") c.Ui.Output(" To connect to master api server from local environment,") c.Ui.Output(" port forwarding is needed. boot2kubernetes starts ") c.Ui.Output(" server for that. To stop server, use ^C (Interrupt).\n") // Create logger with Log level logger := log.New(&logutils.LevelFilter{ Levels: []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR"}, MinLevel: (logutils.LogLevel)(strings.ToUpper(logLevel)), Writer: os.Stderr, }, "", log.LstdFlags) logger.Printf("[DEBUG] LogLevel: %s", logLevel) // Setup port forward server server := &PortForwardServer{ Logger: logger, LocalServer: DefaultLocalServer, RemoteServer: DefaultRemoteServer, } doneCh, errCh, err := server.Start() if err != nil { c.Ui.Error(fmt.Sprintf( "Failed to start port forwarding server: %s", err)) return 1 } sigCh := make(chan os.Signal) signal.Notify(sigCh, os.Interrupt) select { case err := <-errCh: c.Ui.Error(fmt.Sprintf( "Error while running port forwarding server: %s", err)) close(doneCh) return 1 case <-sigCh: c.Ui.Error("\nInterrupted!") close(doneCh) // Need some time for closing work... time.Sleep(ClosingTime) } } return 0 }