func (proxy *Proxy) addWeaveWaitVolume(hostConfig *docker.HostConfig) { var binds []string for _, bind := range hostConfig.Binds { s := strings.Split(bind, ":") if len(s) >= 2 && s[1] == "/w" { continue } binds = append(binds, bind) } hostConfig.Binds = append(binds, fmt.Sprintf("%s:/w:ro", proxy.weaveWaitVolume)) }
// Build is a helper method to perform a Docker build against the // provided Docker client. It will load the image if not specified, // create a container if one does not already exist, and start a // container if the Dockerfile contains RUN commands. It will cleanup // any containers it creates directly, and set the e.Image.ID field // to the generated image. func (e *ClientExecutor) Build(r io.Reader, args map[string]string) error { b := NewBuilder() b.Args = args if e.Excludes == nil { excludes, err := ParseDockerignore(e.Directory) if err != nil { return err } e.Excludes = append(excludes, ".dockerignore") } // TODO: check the Docker daemon version (1.20 is required for Upload) node, err := parser.Parse(r) if err != nil { return err } // identify the base image from, err := b.From(node) if err != nil { return err } // load the image if e.Image == nil { if from == NoBaseImageSpecifier { if runtime.GOOS == "windows" { return fmt.Errorf("building from scratch images is not supported") } from, err = e.CreateScratchImage() if err != nil { return fmt.Errorf("unable to create a scratch image for this build: %v", err) } defer e.CleanupImage(from) } glog.V(4).Infof("Retrieving image %q", from) e.Image, err = e.LoadImage(from) if err != nil { return err } } // update the builder with any information from the image, including ONBUILD // statements if err := b.FromImage(e.Image, node); err != nil { return err } b.RunConfig.Image = from e.LogFn("FROM %s", from) glog.V(4).Infof("step: FROM %s", from) var sharedMount string // create a container to execute in, if necessary mustStart := b.RequiresStart(node) if e.Container == nil { opts := docker.CreateContainerOptions{ Config: &docker.Config{ Image: from, }, } if mustStart { // Transient mounts only make sense on images that will be running processes if len(e.TransientMounts) > 0 { volumeName, err := randSeq(imageSafeCharacters, 24) if err != nil { return err } v, err := e.Client.CreateVolume(docker.CreateVolumeOptions{Name: volumeName}) if err != nil { return fmt.Errorf("unable to create volume to mount secrets: %v", err) } defer e.cleanupVolume(volumeName) sharedMount = v.Mountpoint opts.HostConfig = &docker.HostConfig{ Binds: []string{sharedMount + ":/tmp/__temporarymount"}, } } // TODO: windows support if len(e.Command) > 0 { opts.Config.Cmd = e.Command opts.Config.Entrypoint = nil } else { // TODO; replace me with a better default command opts.Config.Cmd = []string{"sleep 86400"} opts.Config.Entrypoint = []string{"/bin/sh", "-c"} } } if len(opts.Config.Cmd) == 0 { opts.Config.Entrypoint = []string{"/bin/sh", "-c", "# NOP"} } container, err := e.Client.CreateContainer(opts) if err != nil { return fmt.Errorf("unable to create build container: %v", err) } e.Container = container // if we create the container, take responsibilty for cleaning up defer e.Cleanup() } // copy any source content into the temporary mount path if mustStart && len(e.TransientMounts) > 0 { var copies []Copy for i, mount := range e.TransientMounts { source := mount.SourcePath copies = append(copies, Copy{ Src: source, Dest: []string{path.Join("/tmp/__temporarymount", strconv.Itoa(i))}, }) } if err := e.Copy(copies...); err != nil { return fmt.Errorf("unable to copy build context into container: %v", err) } } // TODO: lazy start if mustStart && !e.Container.State.Running { var hostConfig docker.HostConfig if e.HostConfig != nil { hostConfig = *e.HostConfig } // mount individual items temporarily for i, mount := range e.TransientMounts { if len(sharedMount) == 0 { return fmt.Errorf("no mount point available for temporary mounts") } hostConfig.Binds = append( hostConfig.Binds, fmt.Sprintf("%s:%s:%s", path.Join(sharedMount, strconv.Itoa(i)), mount.DestinationPath, "ro"), ) } if err := e.Client.StartContainer(e.Container.ID, &hostConfig); err != nil { return fmt.Errorf("unable to start build container: %v", err) } // TODO: is this racy? may have to loop wait in the actual run step } for _, child := range node.Children { step := b.Step() if err := step.Resolve(child); err != nil { return err } glog.V(4).Infof("step: %s", step.Original) if e.LogFn != nil { e.LogFn(step.Original) } if err := b.Run(step, e); err != nil { return err } } if mustStart { glog.V(4).Infof("Stopping container %s ...", e.Container.ID) if err := e.Client.StopContainer(e.Container.ID, 0); err != nil { return fmt.Errorf("unable to stop build container: %v", err) } } config := b.Config() var repository, tag string if len(e.Tag) > 0 { repository, tag = docker.ParseRepositoryTag(e.Tag) glog.V(4).Infof("Committing built container %s as image %q: %#v", e.Container.ID, e.Tag, config) if e.LogFn != nil { e.LogFn("Committing changes to %s ...", e.Tag) } } else { glog.V(4).Infof("Committing built container %s: %#v", e.Container.ID, config) if e.LogFn != nil { e.LogFn("Committing changes ...") } } image, err := e.Client.CommitContainer(docker.CommitContainerOptions{ Author: b.Author, Container: e.Container.ID, Run: config, Repository: repository, Tag: tag, }) if err != nil { return fmt.Errorf("unable to commit build container: %v", err) } e.Image = image glog.V(4).Infof("Committed %s to %s", e.Container.ID, e.Image.ID) if e.LogFn != nil { e.LogFn("Done") } return nil }