Ejemplo n.º 1
0
func (proxy *Proxy) setWeaveDNS(hostConfig *docker.HostConfig, hostname, dnsDomain string) error {
	hostConfig.DNS = append(hostConfig.DNS, proxy.dockerBridgeIP)

	if len(hostConfig.DNSSearch) == 0 {
		if hostname == "" {
			hostConfig.DNSSearch = []string{dnsDomain}
		} else {
			hostConfig.DNSSearch = []string{"."}
		}
	}

	return nil
}
Ejemplo n.º 2
0
func (proxy *Proxy) addWeaveWaitVolume(hostConfig *docker.HostConfig) {
	var binds []string
	for _, bind := range hostConfig.Binds {
		s := strings.Split(bind, ":")
		if len(s) >= 2 && s[1] == "/w" {
			continue
		}
		binds = append(binds, bind)
	}
	hostConfig.Binds = append(binds, fmt.Sprintf("%s:/w:ro", proxy.weaveWaitVolume))
}
Ejemplo n.º 3
0
func (c *Container) populateAdditionalHostConfig(hostConfig *dockerclient.HostConfig) error {
	links := map[string]string{}

	for _, link := range c.service.DependentServices() {
		if _, ok := c.service.context.Project.Configs[link.Target]; !ok {
			continue
		}

		service, err := c.service.context.Project.CreateService(link.Target)
		if err != nil {
			return err
		}

		containers, err := service.Containers()
		if err != nil {
			return err
		}

		if link.Type == project.RelTypeLink {
			c.addLinks(links, service, link, containers)
		} else if link.Type == project.RelTypeIpcNamespace {
			hostConfig, err = c.addIpc(hostConfig, service, containers)
		} else if link.Type == project.RelTypeNetNamespace {
			hostConfig, err = c.addNetNs(hostConfig, service, containers)
		}

		if err != nil {
			return err
		}
	}

	hostConfig.Links = []string{}
	for k, v := range links {
		hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":"))
	}
	for _, v := range c.service.Config().ExternalLinks {
		hostConfig.Links = append(hostConfig.Links, v)
	}

	return nil
}
Ejemplo n.º 4
0
func (c *Container) addNetNs(config *dockerclient.HostConfig, service project.Service, containers []project.Container) (*dockerclient.HostConfig, error) {
	if len(containers) == 0 {
		return nil, fmt.Errorf("Failed to find container for networks ns %v", c.service.Config().Net)
	}

	id, err := containers[0].ID()
	if err != nil {
		return nil, err
	}

	config.NetworkMode = "container:" + id
	return config, nil
}
func (dg *DockerGoClient) CreateContainer(config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
	timeout := ttime.After(createContainerTimeout)
	hostConfig.Privileged = true

	ctx, cancelFunc := context.WithCancel(context.TODO()) // Could pass one through from engine
	response := make(chan DockerContainerMetadata, 1)
	go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }()
	select {
	case resp := <-response:
		return resp
	case <-timeout:
		cancelFunc()
		return DockerContainerMetadata{Error: &DockerTimeoutError{createContainerTimeout, "created"}}
	}
}
Ejemplo n.º 6
0
// ModifyHostConfig is called before the Docker runContainer call.
// The security context provider can make changes to the HostConfig, affecting
// security options, whether the container is privileged, volume binds, etc.
func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *docker.HostConfig) {
	// Apply pod security context
	if container.Name != leaky.PodInfraContainerName && pod.Spec.SecurityContext != nil {
		// TODO: We skip application of supplemental groups to the
		// infra container to work around a runc issue which
		// requires containers to have the '/etc/group'. For
		// more information see:
		// https://github.com/opencontainers/runc/pull/313
		// This can be removed once the fix makes it into the
		// required version of docker.
		if pod.Spec.SecurityContext.SupplementalGroups != nil {
			hostConfig.GroupAdd = make([]string, len(pod.Spec.SecurityContext.SupplementalGroups))
			for i, group := range pod.Spec.SecurityContext.SupplementalGroups {
				hostConfig.GroupAdd[i] = strconv.Itoa(int(group))
			}
		}

		if pod.Spec.SecurityContext.FSGroup != nil {
			hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(*pod.Spec.SecurityContext.FSGroup)))
		}
	}

	// Apply effective security context for container
	effectiveSC := DetermineEffectiveSecurityContext(pod, container)
	if effectiveSC == nil {
		return
	}

	if effectiveSC.Privileged != nil {
		hostConfig.Privileged = *effectiveSC.Privileged
	}

	if effectiveSC.Capabilities != nil {
		add, drop := MakeCapabilities(effectiveSC.Capabilities.Add, effectiveSC.Capabilities.Drop)
		hostConfig.CapAdd = add
		hostConfig.CapDrop = drop
	}

	if effectiveSC.SELinuxOptions != nil {
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelUser, effectiveSC.SELinuxOptions.User)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelRole, effectiveSC.SELinuxOptions.Role)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelType, effectiveSC.SELinuxOptions.Type)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelLevel, effectiveSC.SELinuxOptions.Level)
	}
}
func (dg *DockerGoClient) createContainer(ctx context.Context, config *docker.Config, hostConfig *docker.HostConfig, name string) DockerContainerMetadata {
	client := dg.dockerClient
	hostConfig.Privileged = true

	containerOptions := docker.CreateContainerOptions{Config: config, HostConfig: hostConfig, Name: name}
	dockerContainer, err := client.CreateContainer(containerOptions)
	select {
	case <-ctx.Done():
		// Parent function already timed out; no need to get container metadata
		return DockerContainerMetadata{}
	default:
	}
	if err != nil {
		return DockerContainerMetadata{Error: CannotXContainerError{"Create", err.Error()}}
	}
	return dg.containerMetadata(dockerContainer.ID)
}
Ejemplo n.º 8
0
// asDockerHostConfig converts a RunContainerOptions into a HostConfig
// understood by the go-dockerclient library.
func (rco RunContainerOptions) asDockerHostConfig() docker.HostConfig {
	hostConfig := docker.HostConfig{
		CapDrop:         rco.CapDrop,
		PublishAllPorts: rco.TargetImage,
		NetworkMode:     rco.NetworkMode,
	}
	if rco.CGroupLimits != nil {
		hostConfig.Memory = rco.CGroupLimits.MemoryLimitBytes
		hostConfig.MemorySwap = rco.CGroupLimits.MemorySwap
		hostConfig.CPUShares = rco.CGroupLimits.CPUShares
		hostConfig.CPUQuota = rco.CGroupLimits.CPUQuota
		hostConfig.CPUPeriod = rco.CGroupLimits.CPUPeriod
	}
	return hostConfig
}
Ejemplo n.º 9
0
// ModifyHostConfig is called before the Docker runContainer call.
// The security context provider can make changes to the HostConfig, affecting
// security options, whether the container is privileged, volume binds, etc.
// An error is returned if it's not possible to secure the container as requested
// with a security context.
func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *docker.HostConfig) {
	if container.SecurityContext == nil {
		return
	}
	if container.SecurityContext.Privileged != nil {
		hostConfig.Privileged = *container.SecurityContext.Privileged
	}

	if container.SecurityContext.Capabilities != nil {
		add, drop := makeCapabilites(container.SecurityContext.Capabilities.Add, container.SecurityContext.Capabilities.Drop)
		hostConfig.CapAdd = add
		hostConfig.CapDrop = drop
	}

	if container.SecurityContext.SELinuxOptions != nil {
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelUser, container.SecurityContext.SELinuxOptions.User)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelRole, container.SecurityContext.SELinuxOptions.Role)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelType, container.SecurityContext.SELinuxOptions.Type)
		hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelLevel, container.SecurityContext.SELinuxOptions.Level)
	}
}
Ejemplo n.º 10
0
// Build is a helper method to perform a Docker build against the
// provided Docker client. It will load the image if not specified,
// create a container if one does not already exist, and start a
// container if the Dockerfile contains RUN commands. It will cleanup
// any containers it creates directly, and set the e.Image.ID field
// to the generated image.
func (e *ClientExecutor) Build(r io.Reader, args map[string]string) error {
	b := NewBuilder()
	b.Args = args

	if e.Excludes == nil {
		excludes, err := ParseDockerignore(e.Directory)
		if err != nil {
			return err
		}
		e.Excludes = append(excludes, ".dockerignore")
	}

	// TODO: check the Docker daemon version (1.20 is required for Upload)

	node, err := parser.Parse(r)
	if err != nil {
		return err
	}

	// identify the base image
	from, err := b.From(node)
	if err != nil {
		return err
	}
	// load the image
	if e.Image == nil {
		if from == NoBaseImageSpecifier {
			if runtime.GOOS == "windows" {
				return fmt.Errorf("building from scratch images is not supported")
			}
			from, err = e.CreateScratchImage()
			if err != nil {
				return fmt.Errorf("unable to create a scratch image for this build: %v", err)
			}
			defer e.CleanupImage(from)
		}
		glog.V(4).Infof("Retrieving image %q", from)
		e.Image, err = e.LoadImage(from)
		if err != nil {
			return err
		}
	}

	// update the builder with any information from the image, including ONBUILD
	// statements
	if err := b.FromImage(e.Image, node); err != nil {
		return err
	}

	b.RunConfig.Image = from
	e.LogFn("FROM %s", from)
	glog.V(4).Infof("step: FROM %s", from)

	var sharedMount string

	// create a container to execute in, if necessary
	mustStart := b.RequiresStart(node)
	if e.Container == nil {
		opts := docker.CreateContainerOptions{
			Config: &docker.Config{
				Image: from,
			},
		}
		if mustStart {
			// Transient mounts only make sense on images that will be running processes
			if len(e.TransientMounts) > 0 {
				volumeName, err := randSeq(imageSafeCharacters, 24)
				if err != nil {
					return err
				}
				v, err := e.Client.CreateVolume(docker.CreateVolumeOptions{Name: volumeName})
				if err != nil {
					return fmt.Errorf("unable to create volume to mount secrets: %v", err)
				}
				defer e.cleanupVolume(volumeName)
				sharedMount = v.Mountpoint
				opts.HostConfig = &docker.HostConfig{
					Binds: []string{sharedMount + ":/tmp/__temporarymount"},
				}
			}

			// TODO: windows support
			if len(e.Command) > 0 {
				opts.Config.Cmd = e.Command
				opts.Config.Entrypoint = nil
			} else {
				// TODO; replace me with a better default command
				opts.Config.Cmd = []string{"sleep 86400"}
				opts.Config.Entrypoint = []string{"/bin/sh", "-c"}
			}
		}
		if len(opts.Config.Cmd) == 0 {
			opts.Config.Entrypoint = []string{"/bin/sh", "-c", "# NOP"}
		}
		container, err := e.Client.CreateContainer(opts)
		if err != nil {
			return fmt.Errorf("unable to create build container: %v", err)
		}
		e.Container = container

		// if we create the container, take responsibilty for cleaning up
		defer e.Cleanup()
	}

	// copy any source content into the temporary mount path
	if mustStart && len(e.TransientMounts) > 0 {
		var copies []Copy
		for i, mount := range e.TransientMounts {
			source := mount.SourcePath
			copies = append(copies, Copy{
				Src:  source,
				Dest: []string{path.Join("/tmp/__temporarymount", strconv.Itoa(i))},
			})
		}
		if err := e.Copy(copies...); err != nil {
			return fmt.Errorf("unable to copy build context into container: %v", err)
		}
	}

	// TODO: lazy start
	if mustStart && !e.Container.State.Running {
		var hostConfig docker.HostConfig
		if e.HostConfig != nil {
			hostConfig = *e.HostConfig
		}

		// mount individual items temporarily
		for i, mount := range e.TransientMounts {
			if len(sharedMount) == 0 {
				return fmt.Errorf("no mount point available for temporary mounts")
			}
			hostConfig.Binds = append(
				hostConfig.Binds,
				fmt.Sprintf("%s:%s:%s", path.Join(sharedMount, strconv.Itoa(i)), mount.DestinationPath, "ro"),
			)
		}

		if err := e.Client.StartContainer(e.Container.ID, &hostConfig); err != nil {
			return fmt.Errorf("unable to start build container: %v", err)
		}
		// TODO: is this racy? may have to loop wait in the actual run step
	}

	for _, child := range node.Children {
		step := b.Step()
		if err := step.Resolve(child); err != nil {
			return err
		}
		glog.V(4).Infof("step: %s", step.Original)
		if e.LogFn != nil {
			e.LogFn(step.Original)
		}
		if err := b.Run(step, e); err != nil {
			return err
		}
	}

	if mustStart {
		glog.V(4).Infof("Stopping container %s ...", e.Container.ID)
		if err := e.Client.StopContainer(e.Container.ID, 0); err != nil {
			return fmt.Errorf("unable to stop build container: %v", err)
		}
	}

	config := b.Config()
	var repository, tag string
	if len(e.Tag) > 0 {
		repository, tag = docker.ParseRepositoryTag(e.Tag)
		glog.V(4).Infof("Committing built container %s as image %q: %#v", e.Container.ID, e.Tag, config)
		if e.LogFn != nil {
			e.LogFn("Committing changes to %s ...", e.Tag)
		}
	} else {
		glog.V(4).Infof("Committing built container %s: %#v", e.Container.ID, config)
		if e.LogFn != nil {
			e.LogFn("Committing changes ...")
		}
	}

	image, err := e.Client.CommitContainer(docker.CommitContainerOptions{
		Author:     b.Author,
		Container:  e.Container.ID,
		Run:        config,
		Repository: repository,
		Tag:        tag,
	})
	if err != nil {
		return fmt.Errorf("unable to commit build container: %v", err)
	}
	e.Image = image
	glog.V(4).Infof("Committed %s to %s", e.Container.ID, e.Image.ID)
	if e.LogFn != nil {
		e.LogFn("Done")
	}
	return nil
}