예제 #1
0
// metadataLayerEquivalent returns true if the last layer of a is equivalent to b, assuming
// that b is squashed over multiple layers, and a is not. b, for instance, will have an empty
// slice entrypoint, while a would have a nil entrypoint.
func metadataLayerEquivalent(a, b *docker.Config) bool {
	if a.Entrypoint == nil && len(b.Entrypoint) == 0 {
		// we are forced to set Entrypoint [] to reset the entrypoint
		b.Entrypoint = nil
	}
	if len(a.OnBuild) == 1 && len(b.OnBuild) > 0 && a.OnBuild[0] == b.OnBuild[len(b.OnBuild)-1] {
		// a layered file will only contain the last OnBuild statement
		b.OnBuild = a.OnBuild
	}
	return metadataEqual(a, b)
}
예제 #2
0
// ModifyContainerConfig is called before the Docker createContainer call.
// The security context provider can make changes to the Config with which
// the container is created.
func (p SimpleSecurityContextProvider) ModifyContainerConfig(pod *api.Pod, container *api.Container, config *docker.Config) {
	if container.SecurityContext == nil {
		return
	}
	if container.SecurityContext.RunAsUser != nil {
		config.User = strconv.FormatInt(*container.SecurityContext.RunAsUser, 10)
	}
}
예제 #3
0
// ModifyContainerConfig is called before the Docker createContainer call.
// The security context provider can make changes to the Config with which
// the container is created.
func (p SimpleSecurityContextProvider) ModifyContainerConfig(pod *api.Pod, container *api.Container, config *docker.Config) {
	effectiveSC := DetermineEffectiveSecurityContext(pod, container)
	if effectiveSC == nil {
		return
	}
	if effectiveSC.RunAsUser != nil {
		config.User = strconv.Itoa(int(*effectiveSC.RunAsUser))
	}
}
예제 #4
0
func (t *Version) MigrateConfigTo(tag string) *docker.CreateContainerOptions {
	var hostconfig docker.HostConfig
	var config docker.Config
	config = *t.Container.Config

	// Set new tag to be used
	r, _ := regexp.Compile(":[^:]+$")
	config.Image = r.ReplaceAllString(config.Image, ":"+tag)

	hostconfig = *t.Container.HostConfig

	c := docker.CreateContainerOptions{
		Name:       t.Container.Name,
		Config:     &config,
		HostConfig: &hostconfig,
	}
	return &c
}
예제 #5
0
파일: client.go 프로젝트: Xmagicer/origin
// Run executes a single Run command against the current container using exec().
// Since exec does not allow ENV or WORKINGDIR to be set, we force the execution of
// the user command into a shell and perform those operations before. Since RUN
// requires /bin/sh, we can use both 'cd' and 'export'.
func (e *ClientExecutor) Run(run Run, config docker.Config) error {
	args := make([]string, len(run.Args))
	copy(args, run.Args)

	if runtime.GOOS == "windows" {
		if len(config.WorkingDir) > 0 {
			args[0] = fmt.Sprintf("cd %s && %s", bashQuote(config.WorkingDir), args[0])
		}
		// TODO: implement windows ENV
		args = append([]string{"cmd", "/S", "/C"}, args...)
	} else {
		if len(config.WorkingDir) > 0 {
			args[0] = fmt.Sprintf("cd %s && %s", bashQuote(config.WorkingDir), args[0])
		}
		if len(config.Env) > 0 {
			args[0] = exportEnv(config.Env) + args[0]
		}
		args = append([]string{"/bin/sh", "-c"}, args...)
	}

	config.Cmd = args

	exec, err := e.Client.CreateExec(docker.CreateExecOptions{
		Cmd:          config.Cmd,
		Container:    e.Container.ID,
		AttachStdout: true,
		AttachStderr: true,
		User:         config.User,
	})
	if err != nil {
		return err
	}
	if err := e.Client.StartExec(exec.ID, docker.StartExecOptions{
		OutputStream: e.Out,
		ErrorStream:  e.ErrOut,
	}); err != nil {
		return err
	}
	status, err := e.Client.InspectExec(exec.ID)
	if err != nil {
		return err
	}
	if status.ExitCode != 0 {
		return fmt.Errorf("running '%s' failed with exit code %d", strings.Join(args, " "), status.ExitCode)
	}
	return nil
}
예제 #6
0
파일: docker.go 프로젝트: smacc364/origin
// this funtion simply abstracts out the tar related processing that was originally inline in RunContainer()
func runContainerTar(opts RunContainerOptions, config docker.Config, imageMetadata *docker.Image) (docker.Config, string) {
	tarDestination := ""
	if opts.TargetImage {
		return config, tarDestination
	}

	// base directory for all STI commands
	var commandBaseDir string
	// untar operation destination directory
	tarDestination = opts.Destination
	if len(tarDestination) == 0 {
		tarDestination = getDestination(imageMetadata)
	}
	if opts.ExternalScripts {
		// for external scripts we must always append 'scripts' because this is
		// the default subdirectory inside tar for them
		// NOTE: We use path.Join instead of filepath.Join to avoid converting the
		// path to UNC (Windows) format as we always run this inside container.
		commandBaseDir = path.Join(tarDestination, "scripts")
		glog.V(2).Infof("Both scripts and untarred source will be placed in '%s'", tarDestination)
	} else {
		// for internal scripts we can have separate path for scripts and untar operation destination
		scriptsURL := opts.ScriptsURL
		if len(scriptsURL) == 0 {
			scriptsURL = getScriptsURL(imageMetadata)
		}
		commandBaseDir = strings.TrimPrefix(scriptsURL, "image://")
		glog.V(2).Infof("Base directory for STI scripts is '%s'. Untarring destination is '%s'.",
			commandBaseDir, tarDestination)
	}

	// NOTE: We use path.Join instead of filepath.Join to avoid converting the
	// path to UNC (Windows) format as we always run this inside container.
	cmd := []string{path.Join(commandBaseDir, string(opts.Command))}
	// when calling assemble script with Stdin parameter set (the tar file)
	// we need to first untar the whole archive and only then call the assemble script
	if opts.Stdin != nil && (opts.Command == api.Assemble || opts.Command == api.Usage) {
		cmd = []string{"/bin/sh", "-c", fmt.Sprintf("tar -C %s -xf - && %s", tarDestination, cmd[0])}
		if opts.CommandOverrides != nil {
			cmd = []string{"/bin/sh", "-c", opts.CommandOverrides(strings.Join(cmd[2:], " "))}
		}
	}
	glog.V(5).Infof("Running %q command in container ...", strings.Join(cmd, " "))
	config.Cmd = cmd
	return config, tarDestination
}
// DockerRun perform a docker run
func DockerRun(req *DockerRunRequest) (DockerRunResponse, error) {
	response := DockerRunResponse{}
	//logit.Info.Println("DockerRun called")
	swarmURL := os.Getenv("SWARM_MANAGER_URL")
	if swarmURL == "" {
		logit.Error.Println("SWARM_MANAGER_URL not set")
		return response, errors.New("SWARM_MANAGER_URL not set")
	}

	var envvars []string
	var i = 0
	if req.EnvVars != nil {
		envvars = make([]string, len(req.EnvVars)+1)
		for k, v := range req.EnvVars {
			envvars[i] = k + "=" + v
			i++
		}
	} else {
		envvars = make([]string, 1)
	}

	if req.Profile == "" {
		return response, errors.New("Profile was empty and should not be")
	}

	//typical case is to always add the profile constraint env var
	//like SM, MED, LG, however in the case of a restore job, we
	//use a hard constraint of the host ipaddress to pin
	//the restored container to the same host as where the backup
	//is stored
	if req.IPAddress != "" {
		envvars[i] = "constraint:host==~" + req.IPAddress
	} else {
		envvars[i] = "constraint:profile==~" + req.Profile
	}

	docker, err := dockerapi.NewClient(swarmURL)
	if err != nil {
		logit.Error.Println(err.Error())
		return response, err
	}

	options := dockerapi.CreateContainerOptions{}
	config := dockerapi.Config{}
	config.Hostname = req.ContainerName
	options.Config = &config
	hostConfig := dockerapi.HostConfig{}
	options.HostConfig = &hostConfig
	options.Name = req.ContainerName
	options.Config.Env = envvars
	options.Config.Image = "crunchydata/" + req.Image
	//logit.Info.Println("swarmapi using " + options.Config.Image + " as the image name")
	options.Config.Volumes = make(map[string]struct{})

	//TODO figure out cpu shares and memory settings, these are different
	//than what I was using before due to me using the docker api directly
	//with this swarm implementation...use the defaults for now

	//options.HostConfig.CPUShares, err = strconv.ParseInt(req.CPU, 0, 64)
	//if err != nil {
	//logit.Error.Println(err.Error())
	//return response, err
	//}
	//options.HostConfig.Memory = req.MEM

	options.HostConfig.Binds = make([]string, 3)
	options.HostConfig.Binds[0] = req.PGDataPath + ":/pgdata"
	options.HostConfig.Binds[1] = "/var/cpm/data/keys:/keys"
	options.HostConfig.Binds[2] = "/var/cpm/config:/syslogconfig"

	container, err3 := docker.CreateContainer(options)
	if err3 != nil {
		logit.Error.Println(err3.Error())
		return response, err3
	}

	var startResponse DockerStartResponse
	startRequest := DockerStartRequest{}
	startRequest.ContainerName = req.ContainerName
	startResponse, err = DockerStart(&startRequest)
	if err != nil {
		logit.Error.Println(err.Error())
		return response, err
	}
	logit.Info.Println(startResponse.Output)
	//cmd := exec.Command(req.CommandPath, req.PGDataPath, req.ContainerName,
	//req.Image, req.CPU, req.MEM, allEnvVars)

	response.ID = container.ID
	return response, nil
}
예제 #8
0
// RunContainer creates and starts a container using the image specified in the options with the ability
// to stream input or output
func (d *stiDocker) RunContainer(opts RunContainerOptions) (err error) {
	// get info about the specified image
	image := getImageName(opts.Image)
	var imageMetadata *docker.Image
	if opts.PullImage {
		imageMetadata, err = d.CheckAndPullImage(image)
	} else {
		imageMetadata, err = d.client.InspectImage(image)
	}
	if err != nil {
		glog.Errorf("Unable to get image metadata for %s: %v", image, err)
		return err
	}

	config := docker.Config{
		Image: image,
		User:  opts.User,
	}

	config, tarDestination := runContainerTar(opts, config, imageMetadata)

	if opts.Env != nil {
		config.Env = opts.Env
	}
	if opts.Stdin != nil {
		config.OpenStdin = true
		config.StdinOnce = true
	}
	if opts.Stdout != nil {
		config.AttachStdout = true
	}

	glog.V(2).Infof("Creating container using config: %+v", config)
	ccopts := docker.CreateContainerOptions{Name: "", Config: &config}
	if opts.TargetImage {
		ccopts.HostConfig = &docker.HostConfig{PublishAllPorts: true, NetworkMode: opts.NetworkMode}
	} else if opts.NetworkMode != "" {
		ccopts.HostConfig = &docker.HostConfig{NetworkMode: opts.NetworkMode}
	}

	container, err := d.client.CreateContainer(ccopts)
	if err != nil {
		return err
	}
	defer d.RemoveContainer(container.ID)

	glog.V(2).Infof("Attaching to container")
	// creating / piping the channels in runContainerAttach lead to unintended hangs
	attached := make(chan struct{})
	wg := runContainerAttach(attached, container, opts, d)
	attached <- <-attached

	glog.V(2).Infof("Starting container")
	if err = d.client.StartContainer(container.ID, nil); err != nil {
		return err
	}
	if opts.OnStart != nil {
		if err = opts.OnStart(); err != nil {
			return err
		}
	}

	if opts.TargetImage {

		runContainerDockerRun(container, d, image)

	} else {
		werr := runContainerWait(wg, d, container)
		if werr != nil {
			return werr
		}
	}

	if opts.PostExec != nil {
		glog.V(2).Infof("Invoking postExecution function")
		if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil {
			return err
		}
	}
	return nil
}
예제 #9
0
파일: docker.go 프로젝트: jhadvig/origin
// RunContainer creates and starts a container using the image specified in the options with the ability
// to stream input or output
func (d *stiDocker) RunContainer(opts RunContainerOptions) (err error) {
	// get info about the specified image
	image := getImageName(opts.Image)
	var imageMetadata *docker.Image
	if opts.PullImage {
		imageMetadata, err = d.CheckAndPullImage(image)
	} else {
		imageMetadata, err = d.client.InspectImage(image)
	}
	if err != nil {
		glog.Errorf("Unable to get image metadata for %s: %v", image, err)
		return err
	}

	// base directory for all STI commands
	var commandBaseDir string
	// untar operation destination directory
	tarDestination := opts.Destination
	if len(tarDestination) == 0 {
		tarDestination = getDestination(imageMetadata)
	}
	if opts.ExternalScripts {
		// for external scripts we must always append 'scripts' because this is
		// the default subdirectory inside tar for them
		commandBaseDir = filepath.Join(tarDestination, "scripts")
		glog.V(2).Infof("Both scripts and untarred source will be placed in '%s'", tarDestination)
	} else {
		// for internal scripts we can have separate path for scripts and untar operation destination
		scriptsURL := opts.ScriptsURL
		if len(scriptsURL) == 0 {
			scriptsURL = getScriptsURL(imageMetadata)
		}
		commandBaseDir = strings.TrimPrefix(scriptsURL, "image://")
		glog.V(2).Infof("Base directory for STI scripts is '%s'. Untarring destination is '%s'.",
			commandBaseDir, tarDestination)
	}

	cmd := []string{filepath.Join(commandBaseDir, string(opts.Command))}
	// when calling assemble script with Stdin parameter set (the tar file)
	// we need to first untar the whole archive and only then call the assemble script
	if opts.Stdin != nil && (opts.Command == api.Assemble || opts.Command == api.Usage) {
		cmd = []string{"/bin/sh", "-c", fmt.Sprintf("tar -C %s -xf - && %s",
			tarDestination, filepath.Join(commandBaseDir, string(opts.Command)))}
	}
	config := docker.Config{
		Image: image,
		Cmd:   cmd,
	}

	if opts.Env != nil {
		config.Env = opts.Env
	}
	if opts.Stdin != nil {
		config.OpenStdin = true
		config.StdinOnce = true
	}
	if opts.Stdout != nil {
		config.AttachStdout = true
	}

	glog.V(2).Infof("Creating container using config: %+v", config)
	container, err := d.client.CreateContainer(docker.CreateContainerOptions{Name: "", Config: &config})
	if err != nil {
		return err
	}
	defer d.RemoveContainer(container.ID)

	glog.V(2).Infof("Attaching to container")
	attached := make(chan struct{})
	attachOpts := docker.AttachToContainerOptions{
		Container: container.ID,
		Success:   attached,
		Stream:    true,
	}
	if opts.Stdin != nil {
		attachOpts.InputStream = opts.Stdin
		attachOpts.Stdin = true
	} else if opts.Stdout != nil {
		attachOpts.OutputStream = opts.Stdout
		attachOpts.Stdout = true
	}

	if opts.Stderr != nil {
		attachOpts.ErrorStream = opts.Stderr
		attachOpts.Stderr = true
	}

	wg := sync.WaitGroup{}
	go func() {
		wg.Add(1)
		defer wg.Done()
		if err := d.client.AttachToContainer(attachOpts); err != nil {
			glog.Errorf("Unable to attach container with %v", attachOpts)
		}
	}()
	attached <- <-attached

	// If attaching both stdin and stdout or stderr, attach stdout and stderr in
	// a second goroutine
	// TODO remove this goroutine when docker 1.4 will be in broad usage,
	// see: https://github.com/docker/docker/commit/f936a10d8048f471d115978472006e1b58a7c67d
	if opts.Stdin != nil && opts.Stdout != nil {
		attached2 := make(chan struct{})
		attachOpts2 := docker.AttachToContainerOptions{
			Container:    container.ID,
			Success:      attached2,
			Stream:       true,
			OutputStream: opts.Stdout,
			Stdout:       true,
		}
		if opts.Stderr != nil {
			attachOpts2.Stderr = true
			attachOpts2.ErrorStream = opts.Stderr
		}
		go func() {
			wg.Add(1)
			defer wg.Done()
			if err := d.client.AttachToContainer(attachOpts2); err != nil {
				glog.Errorf("Unable to attach container with %v", attachOpts2)
			}
		}()
		attached2 <- <-attached2
	}

	glog.V(2).Infof("Starting container")
	if err = d.client.StartContainer(container.ID, nil); err != nil {
		return err
	}
	if opts.OnStart != nil {
		if err = opts.OnStart(); err != nil {
			return err
		}
	}

	glog.V(2).Infof("Waiting for container")
	exitCode, err := d.client.WaitContainer(container.ID)
	wg.Wait()
	if err != nil {
		return err
	}
	glog.V(2).Infof("Container exited")

	if exitCode != 0 {
		return errors.NewContainerError(container.Name, exitCode, "")
	}
	if opts.PostExec != nil {
		glog.V(2).Infof("Invoking postExecution function")
		if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil {
			return err
		}
	}
	return nil
}
예제 #10
0
// RunContainer creates and starts a container using the image specified in the options with the ability
// to stream input or output
func (d *stiDocker) RunContainer(opts RunContainerOptions) (err error) {
	// get info about the specified image
	image := getImageName(opts.Image)
	var imageMetadata *docker.Image
	if opts.PullImage {
		imageMetadata, err = d.CheckAndPullImage(image)
	} else {
		imageMetadata, err = d.client.InspectImage(image)
	}
	if err != nil {
		glog.Errorf("Unable to get image metadata for %s: %v", image, err)
		return err
	}

	config := docker.Config{
		Image: image,
	}

	config, tarDestination := runContainerTar(opts, config, imageMetadata)

	if opts.Env != nil {
		config.Env = opts.Env
	}
	if opts.Stdin != nil {
		config.OpenStdin = true
		config.StdinOnce = true
	}
	if opts.Stdout != nil {
		config.AttachStdout = true
	}

	glog.V(2).Infof("Creating container using config: %+v", config)
	ccopts := docker.CreateContainerOptions{Name: "", Config: &config}
	if opts.TargetImage {
		ccopts.HostConfig = &docker.HostConfig{PublishAllPorts: true}
	}
	container, err := d.client.CreateContainer(ccopts)
	if err != nil {
		return err
	}
	defer d.RemoveContainer(container.ID)

	glog.V(2).Infof("Attaching to container")
	// creating / piping the channels in runContainerAttachOne lead to unintended hangs
	attached := make(chan struct{})
	wg := runContainerAttachOne(attached, container, opts, d)
	attached <- <-attached

	// If attaching both stdin and stdout or stderr, attach stdout and stderr in
	// a second goroutine
	// TODO remove this goroutine when docker 1.4 will be in broad usage,
	// see: https://github.com/docker/docker/commit/f936a10d8048f471d115978472006e1b58a7c67d
	if opts.Stdin != nil && opts.Stdout != nil {
		// creating / piping the channels in runContainerAttachTwo lead to unintended hangs
		attached2 := make(chan struct{})
		runContainerAttachTwo(attached2, container, opts, d, wg)
		attached2 <- <-attached2
	}

	glog.V(2).Infof("Starting container")
	if err = d.client.StartContainer(container.ID, nil); err != nil {
		return err
	}
	if opts.OnStart != nil {
		if err = opts.OnStart(); err != nil {
			return err
		}
	}

	if opts.TargetImage {

		runContainerDockerRun(container, d, image)

	} else {
		werr := runContainerWait(wg, d, container)
		if werr != nil {
			return werr
		}
	}

	if opts.PostExec != nil {
		glog.V(2).Infof("Invoking postExecution function")
		if err = opts.PostExec.PostExecute(container.ID, tarDestination); err != nil {
			return err
		}
	}
	return nil
}