Example #1
0
func (c *ComposeWrapper) createDockerContext() (*project.Project, error) {

	clientFactory, err := docker.NewDefaultClientFactory(docker.ClientOpts{})
	if err != nil {
		log.Fatal(err)
	}

	tlsVerify := os.Getenv(DOCKER_TLS_VERIFY)

	if tlsVerify == "1" {
		clientFactory, err = docker.NewDefaultClientFactory(docker.ClientOpts{
			TLS:       true,
			TLSVerify: true,
		})
		if err != nil {
			log.Fatal(err)
		}
	}

	if c.context.EnvParams != nil && len(c.context.EnvParams) > 0 {
		file, err := os.Open(c.context.ComposeFile)
		if err != nil {
			return nil, fmt.Errorf("Error opening filename %s, %s", c.context.ComposeFile, err.Error())
		}
		parsed, missing := envsubst.SubstFileTokens(file, c.context.ComposeFile, c.context.EnvParams)
		log.Debug("Map: %v\nParsed: %s\n", c.context.EnvParams, parsed)

		if c.context.ErrorOnMissingParams && missing {
			return nil, ErrorParamsMissing
		}
		file, err = ioutil.TempFile("", "depcon")
		if err != nil {
			return nil, err
		}
		err = ioutil.WriteFile(file.Name(), []byte(parsed), os.ModeTemporary)
		if err != nil {
			return nil, err
		}
		c.context.ComposeFile = file.Name()
	}

	return docker.NewProject(&docker.Context{
		Context: project.Context{
			ComposeFile: c.context.ComposeFile,
			ProjectName: c.context.ProjectName,
		},
		ClientFactory: clientFactory,
	})
}
Example #2
0
func (c *ListCommand) Run(args []string) int {

	var insecure bool
	flags := flag.NewFlagSet("list", flag.ContinueOnError)
	flags.BoolVar(&insecure, "insecure", false, "")
	flags.Usage = func() { c.Ui.Error(c.Help()) }

	errR, errW := io.Pipe()
	errScanner := bufio.NewScanner(errR)
	go func() {
		for errScanner.Scan() {
			c.Ui.Error(errScanner.Text())
		}
	}()

	flags.SetOutput(errW)

	if err := flags.Parse(args); err != nil {
		return 1
	}

	// Set up docker client
	clientFactory, err := docker.NewDefaultClientFactory(
		docker.ClientOpts{
			TLS: !insecure,
		},
	)

	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to construct Docker client: %s", err))
		return 1
	}

	client := clientFactory.Create(nil)

	// Marshaling to post filter as API request
	filterK8SRelatedStr, _ := json.Marshal(FilterK8SRelated)
	relatedContainers, err := client.ListContainers(true, false, (string)(filterK8SRelatedStr))
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to list containers: %s", err))
		return 1
	}

	if len(relatedContainers) < 1 {
		c.Ui.Info("There are no containers which are labeled io.kubernetes.pod.name")
		return 0
	}

	c.Ui.Output("NAME")
	for _, container := range relatedContainers {
		c.Ui.Output(fmt.Sprintf("%s", container.Names[0]))
	}

	return 0
}
Example #3
0
// Populate updates the specified docker context based on command line arguments and subcommands.
func Populate(context *docker.Context, c *cli.Context) {
	context.ConfigDir = c.String("configdir")

	opts := docker.ClientOpts{}
	opts.TLS = c.GlobalBool("tls")
	opts.TLSVerify = c.GlobalBool("tlsverify")
	opts.TLSOptions.CAFile = c.GlobalString("tlscacert")
	opts.TLSOptions.CertFile = c.GlobalString("tlscert")
	opts.TLSOptions.KeyFile = c.GlobalString("tlskey")

	clientFactory, err := docker.NewDefaultClientFactory(opts)
	if err != nil {
		logrus.Fatalf("Failed to construct Docker client: %v", err)
	}

	context.ClientFactory = clientFactory
}
Example #4
0
func (c *UpCommand) Run(args []string) int {
	var insecure bool
	var logLevel string
	flags := flag.NewFlagSet("up", flag.ContinueOnError)
	flags.BoolVar(&insecure, "insecure", false, "")
	flags.StringVar(&logLevel, "log-level", "info", "")
	flags.Usage = func() { c.Ui.Error(c.Help()) }

	errR, errW := io.Pipe()
	errScanner := bufio.NewScanner(errR)
	go func() {
		for errScanner.Scan() {
			c.Ui.Error(errScanner.Text())
		}
	}()

	flags.SetOutput(errW)

	if err := flags.Parse(args); err != nil {
		return 1
	}

	compose, err := config.Asset("k8s.yml")
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to read k8s.yml: %s", err))
		return 1
	}

	// Set up docker client
	clientFactory, err := docker.NewDefaultClientFactory(
		docker.ClientOpts{
			TLS: !insecure,
		},
	)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to construct Docker client: %s", err))
		return 1
	}

	// Setup new docker-compose project
	context := &docker.Context{
		Context: project.Context{
			Log:          false,
			ComposeBytes: compose,
			ProjectName:  "boot2k8s",
		},
		ClientFactory: clientFactory,
	}

	// Setup new docker-compose project
	project, err := docker.NewProject(context)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to setup project: %s", err))
		return 1
	}

	c.Ui.Output("Start kubernetes cluster!")
	upErrCh := make(chan error)
	go func() {
		if err := project.Up(); err != nil {
			upErrCh <- err
		}
	}()

	client := clientFactory.Create(nil)

	sigCh := make(chan os.Signal)
	signal.Notify(sigCh, os.Interrupt)

	select {
	case <-afterContainerReady(client):
		c.Ui.Info("Successfully start kubernetes cluster")
	case err := <-upErrCh:
		c.Ui.Error("")
		c.Ui.Error(fmt.Sprintf("Failed to start containers: %s", err))
		c.Ui.Error("Check docker daemon is wroking")
		return 1
	case <-sigCh:
		c.Ui.Error("")
		c.Ui.Error("Interrupted!")
		c.Ui.Error("It's ambiguous that boot2kubernetes could correctly start containers.")
		c.Ui.Error("So request to kubelet may be failed. Check the containers are working")
		c.Ui.Error("with `docker ps` command by yourself.")
		return 1
	case <-time.After(CheckTimeOut):
		c.Ui.Error("")
		c.Ui.Error("Timeout happened while waiting cluster containers are ready.")
		c.Ui.Error("It's ambiguous that boot2kubernetes could correctly start containers.")
		c.Ui.Error("So request to kubelet may be failed. Check the containers are working")
		c.Ui.Error("with `docker ps` command by yourself.")
		return 1
	}

	// If docker runs on boot2docker, port forwarding is needed.
	if runtime.GOOS == "darwin" {

		c.Ui.Output("")
		c.Ui.Output("==> WARNING: You're running docker on boot2docker!")
		c.Ui.Output("  To connect to master api server from local environment,")
		c.Ui.Output("  port forwarding is needed. boot2kubernetes starts ")
		c.Ui.Output("  server for that. To stop server, use ^C (Interrupt).\n")

		// Create logger with Log level
		logger := log.New(&logutils.LevelFilter{
			Levels:   []logutils.LogLevel{"DEBUG", "INFO", "WARN", "ERROR"},
			MinLevel: (logutils.LogLevel)(strings.ToUpper(logLevel)),
			Writer:   os.Stderr,
		}, "", log.LstdFlags)
		logger.Printf("[DEBUG] LogLevel: %s", logLevel)

		// Setup port forward server
		server := &PortForwardServer{
			Logger:       logger,
			LocalServer:  DefaultLocalServer,
			RemoteServer: DefaultRemoteServer,
		}

		doneCh, errCh, err := server.Start()
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Failed to start port forwarding server: %s", err))
			return 1
		}

		sigCh := make(chan os.Signal)
		signal.Notify(sigCh, os.Interrupt)
		select {
		case err := <-errCh:
			c.Ui.Error(fmt.Sprintf(
				"Error while running port forwarding server: %s", err))
			close(doneCh)
			return 1
		case <-sigCh:
			c.Ui.Error("\nInterrupted!")
			close(doneCh)
			// Need some time for closing work...
			time.Sleep(ClosingTime)
		}
	}

	return 0
}
Example #5
0
func (c *DestroyCommand) Run(args []string) int {

	var insecure bool
	flags := flag.NewFlagSet("destroy", flag.ContinueOnError)
	flags.BoolVar(&insecure, "insecure", false, "")
	flags.Usage = func() { c.Ui.Error(c.Help()) }

	errR, errW := io.Pipe()
	errScanner := bufio.NewScanner(errR)
	go func() {
		for errScanner.Scan() {
			c.Ui.Error(errScanner.Text())
		}
	}()

	flags.SetOutput(errW)

	if err := flags.Parse(args); err != nil {
		return 1
	}

	compose, err := config.Asset("k8s.yml")
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to read k8s.yml: %s", err))
		return 1
	}

	// Set up docker client
	clientFactory, err := docker.NewDefaultClientFactory(
		docker.ClientOpts{
			TLS: !insecure,
		},
	)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to construct Docker client: %s", err))
		return 1
	}

	// Setup new docker-compose project
	context := &docker.Context{
		Context: project.Context{
			Log:          false,
			ComposeBytes: compose,
			ProjectName:  "boot2k8s",
		},
		ClientFactory: clientFactory,
	}

	project, err := docker.NewProject(context)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to setup project: %s", err))
		return 1
	}

	if err := project.Delete(); err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to destroy project: %s", err))
		return 1
	}

	client := clientFactory.Create(nil)

	// Marshaling to post filter as API request
	filterLocalMasterStr, _ := json.Marshal(FilterLocalMaster)
	// Get Container info from deamon based on fileter
	localMasters, err := client.ListContainers(true, false, (string)(filterLocalMasterStr))
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to list containers: %s", err))
		return 1
	}

	if len(localMasters) > 0 {
		c.Ui.Output("Are you sure you want to destroy below containers?")
		for _, container := range localMasters {
			c.Ui.Output(fmt.Sprintf("  %s", container.Names[0]))
		}

		if yes, err := AskYesNo(); !yes || err != nil {
			if err == nil {
				c.Ui.Info("Containers will no be destroyed, since the confirmation")
				return 0
			}
			c.Ui.Error(fmt.Sprintf(
				"Terminate to destroy: %s", err.Error()))
			return 1
		}

		resultCh, errCh := removeContainers(client, localMasters, true, true)
		go func() {
			for res := range resultCh {
				c.Ui.Output(fmt.Sprintf(
					"Successfully destroy %s", res.Names[0]))
			}
		}()

		for err := range errCh {
			c.Ui.Error(fmt.Sprintf("Error: %s", err))
		}
		c.Ui.Output("")
	}

	// Marshaling to post filter as API request
	filterK8SRelatedStr, _ := json.Marshal(FilterK8SRelated)
	relatedContainers, err := client.ListContainers(true, false, (string)(filterK8SRelatedStr))
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Failed to list containers: %s", err))
		return 1
	}

	if len(relatedContainers) < 1 {
		// Correctly clean all containers
		return 0
	}

	c.Ui.Output("Do you also remove these containers? (these are created by kubernetes)")
	c.Ui.Error("==> WARNING: boot2kubernetes can not detect below containers")
	c.Ui.Error("  are created by kubernetes which up by boot2kubernetes.")
	c.Ui.Error("  Be sure below these will not be used anymore!")
	for _, container := range relatedContainers {
		c.Ui.Output(fmt.Sprintf("  %s", container.Names[0]))
	}

	if yes, err := AskYesNo(); !yes || err != nil {
		if err == nil {
			c.Ui.Info("Containers will no be destroyed, since the confirmation")
			return 0
		}
		c.Ui.Error(fmt.Sprintf(
			"Terminate to destroy: %s", err.Error()))
		return 1
	}

	resultCh, errCh := removeContainers(client, relatedContainers, true, true)
	go func() {
		for res := range resultCh {
			c.Ui.Output(fmt.Sprintf(
				"Successfully removed %s", res.Names[0]))
		}
	}()

	for err := range errCh {
		c.Ui.Error(fmt.Sprintf("Error: %s", err))
	}

	return 0
}