func pullCommand(c *cli.Context) { args := c.Args() if len(args) < 1 { log.Fatal("rocker pull <image>") } dockerClient, err := dockerclient.NewFromCli(c) if err != nil { log.Fatal(err) } cacheDir, err := util.MakeAbsolute(c.String("cache-dir")) if err != nil { log.Fatal(err) } options := build.DockerClientOptions{ Client: dockerClient, Auth: initAuth(c), Log: log.StandardLogger(), S3storage: s3.New(dockerClient, cacheDir), StdoutContainerFormatter: log.StandardLogger().Formatter, StderrContainerFormatter: log.StandardLogger().Formatter, } client := build.NewDockerClient(options) if err := client.PullImage(args[0]); err != nil { log.Fatal(err) } }
// PullDockerImage pulls an image and streams to a logger respecting terminal features func PullDockerImage(client *docker.Client, image *imagename.ImageName, auth *docker.AuthConfigurations) (*docker.Image, error) { if image.Storage == imagename.StorageS3 { s3storage := s3.New(client, os.TempDir()) if err := s3storage.Pull(image.String()); err != nil { return nil, err } } else { pipeReader, pipeWriter := io.Pipe() pullOpts := docker.PullImageOptions{ Repository: image.NameWithRegistry(), Registry: image.Registry, Tag: image.Tag, OutputStream: pipeWriter, RawJSONStream: true, } repoAuth, err := dockerclient.GetAuthForRegistry(auth, image) if err != nil { return nil, fmt.Errorf("Failed to authenticate registry %s, error: %s", image.Registry, err) } errch := make(chan error, 1) go func() { err := client.PullImage(pullOpts, repoAuth) if err := pipeWriter.Close(); err != nil { log.Errorf("Failed to close pull image stream for %s, error: %s", image, err) } errch <- err }() def := log.StandardLogger() fd, isTerminal := term.GetFdInfo(def.Out) out := def.Out if !isTerminal { out = def.Writer() } if err := jsonmessage.DisplayJSONMessagesStream(pipeReader, out, fd, isTerminal); err != nil { return nil, fmt.Errorf("Failed to process json stream for image: %s, error: %s", image, err) } if err := <-errch; err != nil { return nil, fmt.Errorf("Failed to pull image %s, error: %s", image, err) } } img, err := client.InspectImage(image.String()) if err != nil { return nil, fmt.Errorf("Failed to inspect image %s after pull, error: %s", image, err) } return img, nil }
// resolveVersions walks through the list of images and resolves their tags in case they are not strict func (client *DockerClient) resolveVersions(local, hub bool, vars template.Vars, containers []*Container) (err error) { // Provide function getter of all images to fetch only once var available []*imagename.ImageName getImages := func() ([]*imagename.ImageName, error) { if available == nil { available = []*imagename.ImageName{} if !local { return available, nil } // retrieving images currently available in docker var dockerImages []docker.APIImages if dockerImages, err = client.Docker.ListImages(docker.ListImagesOptions{}); err != nil { return nil, err } for _, image := range dockerImages { for _, repoTag := range image.RepoTags { available = append(available, imagename.NewFromString(repoTag)) } } } return available, nil } resolved := map[string]*imagename.ImageName{} // check images for each container for _, container := range containers { // error in configuration, fail fast if container.Image == nil { err = fmt.Errorf("Image is not specified for the container: %s", container.Name) return } // Version specified in variables var k string k = fmt.Sprintf("v_image_%s", container.Image.NameWithRegistry()) if tag, ok := vars[k]; ok { log.Infof("Resolve %s --> %s (derived by variable %s)", container.Image, tag, k) container.Image.SetTag(tag.(string)) } k = fmt.Sprintf("v_container_%s", container.Name.Name) if tag, ok := vars[k]; ok { log.Infof("Resolve %s --> %s (derived by variable %s)", container.Image, tag, k) container.Image.SetTag(tag.(string)) } // Do not resolve anything if the image is strict, e.g. "redis:2.8.11" or "redis:latest" if container.Image.IsStrict() { continue } // already resolved it for other container if _, ok := resolved[container.Image.String()]; ok { container.Image = resolved[container.Image.String()] continue } // Override to not change the common images slice var images []*imagename.ImageName if images, err = getImages(); err != nil { return err } // looking locally first candidate := container.Image.ResolveVersion(images, true) // in case we want to include external images as well, pulling list of available // images from repository or central docker hub if hub || candidate == nil { log.Debugf("Getting list of tags for %s from the registry", container.Image) var remote []*imagename.ImageName if container.Image.Storage == imagename.StorageS3 { s3storage := s3.New(client.Docker, os.TempDir()) remote, err = s3storage.ListTags(container.Image.String()) } else { remote, err = dockerclient.RegistryListTags(container.Image, client.Auth) } if err != nil { return fmt.Errorf("Failed to list tags of image %s for container %s from the remote registry, error: %s", container.Image, container.Name, err) } log.Debugf("remote: %v", remote) // Re-Resolve having hub tags candidate = container.Image.ResolveVersion(append(images, remote...), false) } if candidate == nil { err = fmt.Errorf("Image not found: %s", container.Image) return } candidate.IsOldS3Name = container.Image.IsOldS3Name log.Infof("Resolve %s --> %s", container.Image, candidate.GetTag()) container.Image = candidate resolved[container.Image.String()] = candidate } return }
func buildCommand(c *cli.Context) { var ( rockerfile *build.Rockerfile err error ) // We don't want info level for 'print' mode // So log only errors unless 'debug' is on if c.Bool("print") && log.StandardLogger().Level != log.DebugLevel { log.StandardLogger().Level = log.ErrorLevel } vars, err := template.VarsFromFileMulti(c.StringSlice("vars")) if err != nil { log.Fatal(err) os.Exit(1) } cliVars, err := template.VarsFromStrings(c.StringSlice("var")) if err != nil { log.Fatal(err) } vars = vars.Merge(cliVars) if c.Bool("demand-artifacts") { vars["DemandArtifacts"] = true } wd, err := os.Getwd() if err != nil { log.Fatal(err) } configFilename := c.String("file") contextDir := wd if configFilename == "-" { rockerfile, err = build.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{}) if err != nil { log.Fatal(err) } } else { if !filepath.IsAbs(configFilename) { configFilename = filepath.Join(wd, configFilename) } rockerfile, err = build.NewRockerfileFromFile(configFilename, vars, template.Funs{}) if err != nil { log.Fatal(err) } // Initialize context dir contextDir = filepath.Dir(configFilename) } args := c.Args() if len(args) > 0 { contextDir = args[0] if !filepath.IsAbs(contextDir) { contextDir = filepath.Join(wd, args[0]) } } else if contextDir != wd { log.Warningf("Implicit context directory used: %s. You can override context directory using the last argument.", contextDir) } dir, err := os.Stat(contextDir) if err != nil { log.Errorf("Problem with opening directory %s, error: %s", contextDir, err) os.Exit(2) } if !dir.IsDir() { log.Errorf("Context directory %s is not a directory.", contextDir) os.Exit(2) } log.Debugf("Context directory: %s", contextDir) if c.Bool("print") { fmt.Print(rockerfile.Content) os.Exit(0) } dockerignore := []string{} dockerignoreFilename := filepath.Join(contextDir, ".dockerignore") if _, err := os.Stat(dockerignoreFilename); err == nil { if dockerignore, err = build.ReadDockerignoreFile(dockerignoreFilename); err != nil { log.Fatal(err) } } var config *dockerclient.Config config = dockerclient.NewConfigFromCli(c) dockerClient, err := dockerclient.NewFromConfig(config) if err != nil { log.Fatal(err) } cacheDir, err := util.MakeAbsolute(c.String("cache-dir")) if err != nil { log.Fatal(err) } var cache build.Cache if !c.Bool("no-cache") { cache = build.NewCacheFS(cacheDir) } var ( stdoutContainerFormatter log.Formatter = &log.JSONFormatter{} stderrContainerFormatter log.Formatter = &log.JSONFormatter{} ) if !c.GlobalBool("json") { stdoutContainerFormatter = build.NewMonochromeContainerFormatter() stderrContainerFormatter = build.NewColoredContainerFormatter() } options := build.DockerClientOptions{ Client: dockerClient, Auth: initAuth(c), Log: log.StandardLogger(), S3storage: s3.New(dockerClient, cacheDir), StdoutContainerFormatter: stdoutContainerFormatter, StderrContainerFormatter: stderrContainerFormatter, PushRetryCount: c.Int("push-retry"), Host: config.Host, LogExactSizes: c.GlobalBool("json"), } client := build.NewDockerClient(options) builder := build.New(client, rockerfile, cache, build.Config{ InStream: os.Stdin, OutStream: os.Stdout, ContextDir: contextDir, Dockerignore: dockerignore, ArtifactsPath: c.String("artifacts-path"), Pull: c.Bool("pull"), NoGarbage: c.Bool("no-garbage"), Attach: c.Bool("attach"), Verbose: c.GlobalBool("verbose"), ID: c.String("id"), NoCache: c.Bool("no-cache"), ReloadCache: c.Bool("reload-cache"), Push: c.Bool("push"), CacheDir: cacheDir, LogJSON: c.GlobalBool("json"), BuildArgs: runconfigopts.ConvertKVStringsToMap(c.StringSlice("build-arg")), }) plan, err := build.NewPlan(rockerfile.Commands(), true) if err != nil { log.Fatal(err) } // Check the docker connection before we actually run if err := dockerclient.Ping(dockerClient, 5000); err != nil { log.Fatal(err) } if err := builder.Run(plan); err != nil { log.Fatal(err) } fields := log.Fields{} if c.GlobalBool("json") { fields["size"] = builder.VirtualSize fields["delta"] = builder.ProducedSize } size := fmt.Sprintf("final size %s (+%s from the base image)", units.HumanSize(float64(builder.VirtualSize)), units.HumanSize(float64(builder.ProducedSize)), ) log.WithFields(fields).Infof("Successfully built %.12s | %s", builder.GetImageID(), size) }