Exemplo n.º 1
1
func (daemon *Daemon) reserveName(id, name string) (string, error) {
	if !validContainerNamePattern.MatchString(name) {
		return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
	}

	if name[0] != '/' {
		name = "/" + name
	}

	if _, err := daemon.containerGraph.Set(name, id); err != nil {
		if !graphdb.IsNonUniqueNameError(err) {
			return "", err
		}

		conflictingContainer, err := daemon.GetByName(name)
		if err != nil {
			if strings.Contains(err.Error(), "Could not find entity") {
				return "", err
			}

			// Remove name and continue starting the container
			if err := daemon.containerGraph.Delete(name); err != nil {
				return "", err
			}
		} else {
			nameAsKnownByUser := strings.TrimPrefix(name, "/")
			return "", fmt.Errorf(
				"Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", nameAsKnownByUser,
				stringid.TruncateID(conflictingContainer.ID))
		}
	}
	return name, nil
}
Exemplo n.º 2
0
// Run the builder with the context. This is the lynchpin of this package. This
// will (barring errors):
//
// * call readContext() which will set up the temporary directory and unpack
//   the context into it.
// * read the dockerfile
// * parse the dockerfile
// * walk the parse tree and execute it by dispatching to handlers. If Remove
//   or ForceRemove is set, additional cleanup around containers happens after
//   processing.
// * Print a happy message and return the image ID.
//
func (b *builder) Run(context io.Reader) (string, error) {
	if err := b.readContext(context); err != nil {
		return "", err
	}

	defer func() {
		if err := os.RemoveAll(b.contextPath); err != nil {
			logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
		}
	}()

	if err := b.readDockerfile(); err != nil {
		return "", err
	}

	// some initializations that would not have been supplied by the caller.
	b.Config = &runconfig.Config{}

	b.TmpContainers = map[string]struct{}{}

	for i, n := range b.dockerfile.Children {
		select {
		case <-b.cancelled:
			logrus.Debug("Builder: build cancelled!")
			fmt.Fprintf(b.OutStream, "Build cancelled")
			return "", fmt.Errorf("Build cancelled")
		default:
			// Not cancelled yet, keep going...
		}
		if err := b.dispatch(i, n); err != nil {
			if b.ForceRemove {
				b.clearTmp()
			}
			return "", err
		}
		fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
		if b.Remove {
			b.clearTmp()
		}
	}

	// check if there are any leftover build-args that were passed but not
	// consumed during build. Return an error, if there are any.
	leftoverArgs := []string{}
	for arg := range b.buildArgs {
		if !b.isBuildArgAllowed(arg) {
			leftoverArgs = append(leftoverArgs, arg)
		}
	}
	if len(leftoverArgs) > 0 {
		return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
	}

	if b.image == "" {
		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
	}

	fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image))
	return b.image, nil
}
Exemplo n.º 3
0
func (p *v2Pusher) pushLayerIfNecessary(out io.Writer, l layer.Layer) (digest.Digest, error) {
	logrus.Debugf("Pushing layer: %s", l.DiffID())

	// Do we have any blobsums associated with this layer's DiffID?
	possibleBlobsums, err := p.blobSumService.GetBlobSums(l.DiffID())
	if err == nil {
		dgst, exists, err := p.blobSumAlreadyExists(possibleBlobsums)
		if err != nil {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Image push failed", nil))
			return "", err
		}
		if exists {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Layer already exists", nil))
			return dgst, nil
		}
	}

	// if digest was empty or not saved, or if blob does not exist on the remote repository,
	// then push the blob.
	pushDigest, err := p.pushV2Layer(p.repo.Blobs(context.Background()), l)
	if err != nil {
		return "", err
	}
	// Cache mapping from this layer's DiffID to the blobsum
	if err := p.blobSumService.Add(l.DiffID(), pushDigest); err != nil {
		return "", err
	}

	return pushDigest, nil
}
Exemplo n.º 4
0
func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
	jsonRaw, err := p.getV1Config(imgID)
	if err != nil {
		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
	}
	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))

	compatibilityID, err := p.getV1ID(imgID)
	if err != nil {
		return "", err
	}

	// General rule is to use ID for graph accesses and compatibilityID for
	// calls to session.registry()
	imgData := &registry.ImgData{
		ID: compatibilityID,
	}

	// Send the json
	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
		if err == registry.ErrAlreadyExists {
			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil))
			return "", nil
		}
		return "", err
	}

	layerData, err := p.graph.TempLayerArchive(imgID, p.sf, p.out)
	if err != nil {
		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
	}
	defer os.RemoveAll(layerData.Name())

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size)

	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID,
		progressreader.New(progressreader.Config{
			In:        layerData,
			Out:       p.out,
			Formatter: p.sf,
			Size:      layerData.Size,
			NewLines:  false,
			ID:        stringid.TruncateID(imgID),
			Action:    "Pushing",
		}), ep, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
		return "", err
	}

	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil))
	return imgData.Checksum, nil
}
Exemplo n.º 5
0
// Get returns a container using it's ID or Name
func (containers Containers) Get(IDOrName string) *Container {
	// Abort immediately if the name is empty.
	if len(IDOrName) == 0 {
		return nil
	}

	// Match exact or short Container ID.
	for _, container := range containers {
		if container.Id == IDOrName || stringid.TruncateID(container.Id) == IDOrName {
			return container
		}
	}

	// Match exact Swarm ID.
	for _, container := range containers {
		if swarmID := container.Config.SwarmID(); swarmID == IDOrName || stringid.TruncateID(swarmID) == IDOrName {
			return container
		}
	}

	candidates := []*Container{}

	// Match name, /name or engine/name.
	for _, container := range containers {
		for _, name := range container.Names {
			if name == IDOrName || name == "/"+IDOrName || container.Engine.ID+name == IDOrName || container.Engine.Name+name == IDOrName {
				return container
			}
		}
	}

	if size := len(candidates); size == 1 {
		return candidates[0]
	} else if size > 1 {
		return nil
	}

	// Match Container ID prefix.
	for _, container := range containers {
		if strings.HasPrefix(container.Id, IDOrName) {
			candidates = append(candidates, container)
		}
	}

	// Match Swarm ID prefix.
	for _, container := range containers {
		if strings.HasPrefix(container.Config.SwarmID(), IDOrName) {
			candidates = append(candidates, container)
		}
	}

	if len(candidates) == 1 {
		return candidates[0]
	}

	return nil
}
Exemplo n.º 6
0
// checkImageDeleteConflict determines whether there are any conflicts
// preventing deletion of the given image from this daemon. A hard conflict is
// any image which has the given image as a parent or any running container
// using the image. A soft conflict is any tags/digest referencing the given
// image or any stopped container using the image. If ignoreSoftConflicts is
// true, this function will not check for soft conflict conditions.
func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
	// Check if the image has any descendent images.
	if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
		return &imageDeleteConflict{
			hard:    true,
			imgID:   imgID,
			message: "image has dependent child images",
		}
	}

	if mask&conflictRunningContainer != 0 {
		// Check if any running container is using the image.
		for _, container := range daemon.List() {
			if !container.IsRunning() {
				// Skip this until we check for soft conflicts later.
				continue
			}

			if container.ImageID == imgID {
				return &imageDeleteConflict{
					imgID:   imgID,
					hard:    true,
					used:    true,
					message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
				}
			}
		}
	}

	// Check if any repository tags/digest reference this image.
	if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 {
		return &imageDeleteConflict{
			imgID:   imgID,
			message: "image is referenced in one or more repositories",
		}
	}

	if mask&conflictStoppedContainer != 0 {
		// Check if any stopped containers reference this image.
		for _, container := range daemon.List() {
			if container.IsRunning() {
				// Skip this as it was checked above in hard conflict conditions.
				continue
			}

			if container.ImageID == imgID {
				return &imageDeleteConflict{
					imgID:   imgID,
					used:    true,
					message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
				}
			}
		}
	}

	return nil
}
Exemplo n.º 7
0
// CmdHistory shows the history of an image.
//
// Usage: docker history [OPTIONS] IMAGE
func (cli *DockerCli) CmdHistory(args ...string) error {
	cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true)
	human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format")
	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
	cmd.Require(flag.Exact, 1)

	cmd.ParseFlags(args, true)

	serverResp, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil)
	if err != nil {
		return err
	}

	defer serverResp.body.Close()

	history := []types.ImageHistory{}
	if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil {
		return err
	}

	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)

	if *quiet {
		for _, entry := range history {
			if *noTrunc {
				fmt.Fprintf(w, "%s\n", entry.ID)
			} else {
				fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID))
			}
		}
		w.Flush()
		return nil
	}

	fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT")
	for _, entry := range history {
		imageID := entry.ID
		createdBy := strings.Replace(entry.CreatedBy, "\t", " ", -1)
		if *noTrunc == false {
			createdBy = stringutils.Truncate(createdBy, 45)
			imageID = stringid.TruncateID(entry.ID)
		}

		created := units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago"
		size := units.HumanSize(float64(entry.Size))
		if *human == false {
			created = time.Unix(entry.Created, 0).Format(time.RFC3339)
			size = strconv.FormatInt(entry.Size, 10)
		}

		fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment)
	}
	w.Flush()
	return nil
}
Exemplo n.º 8
0
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
	out := p.config.OutStream

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))

	image, err := p.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := p.graph.TarLayer(image)
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// Send the layer
	layerUpload, err := bs.Create(context.Background())
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	digester := digest.Canonical.New()
	tee := io.TeeReader(arch, digester.Hash())

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(tee), // we'll take care of close here.
		Out:       out,
		Formatter: p.sf,

		// TODO(stevvooe): This may cause a size reporting error. Try to get
		// this from tar-split or elsewhere. The main issue here is that we
		// don't want to buffer to disk *just* to calculate the size.
		Size: img.Size,

		NewLines: false,
		ID:       stringid.TruncateID(img.ID),
		Action:   "Pushing",
	})

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
	nn, err := io.Copy(layerUpload, reader)
	if err != nil {
		return "", err
	}

	dgst := digester.Digest()
	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
		return "", err
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))

	return dgst, nil
}
Exemplo n.º 9
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q", di.digest)

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	digestStr := di.digest.String()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(digestStr),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Exemplo n.º 10
0
// CmdServiceLs handles service list UI
func (cli *NetworkCli) CmdServiceLs(chain string, args ...string) error {
	cmd := cli.Subcmd(chain, "ls", "SERVICE", "Lists all the services on a network", false)
	flNetwork := cmd.String([]string{"net", "-network"}, "", "Only show the services that are published on the specified network")
	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Do not truncate the output")

	err := cmd.ParseFlags(args, true)
	if err != nil {
		return err
	}

	var obj []byte
	if *flNetwork == "" {
		obj, _, err = readBody(cli.call("GET", "/services", nil, nil))
	} else {
		obj, _, err = readBody(cli.call("GET", "/services?network="+*flNetwork, nil, nil))
	}
	if err != nil {
		return err
	}

	var serviceResources []serviceResource
	err = json.Unmarshal(obj, &serviceResources)
	if err != nil {
		fmt.Println(err)
		return err
	}

	wr := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
	// unless quiet (-q) is specified, print field titles
	if !*quiet {
		fmt.Fprintln(wr, "SERVICE ID\tNAME\tNETWORK\tCONTAINER\tSANDBOX")
	}

	for _, sr := range serviceResources {
		ID := sr.ID
		bkID, sbID, err := getBackendID(cli, ID)
		if err != nil {
			return err
		}
		if !*noTrunc {
			ID = stringid.TruncateID(ID)
			bkID = stringid.TruncateID(bkID)
			sbID = stringid.TruncateID(sbID)
		}
		if !*quiet {
			fmt.Fprintf(wr, "%s\t%s\t%s\t%s\t%s\n", ID, sr.Name, sr.Network, bkID, sbID)
		} else {
			fmt.Fprintln(wr, ID)
		}
	}
	wr.Flush()

	return nil
}
Exemplo n.º 11
0
// CmdHistory shows the history of an image.
//
// Usage: docker history [OPTIONS] IMAGE
func (cli *DockerCli) CmdHistory(args ...string) error {
	cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true)
	human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format")
	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
	noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output")
	cmd.Require(flag.Exact, 1)

	cmd.ParseFlags(args, true)

	history, err := cli.client.ImageHistory(context.Background(), cmd.Arg(0))
	if err != nil {
		return err
	}

	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)

	if *quiet {
		for _, entry := range history {
			if *noTrunc {
				fmt.Fprintf(w, "%s\n", entry.ID)
			} else {
				fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID))
			}
		}
		w.Flush()
		return nil
	}

	var imageID string
	var createdBy string
	var created string
	var size string

	fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT")
	for _, entry := range history {
		imageID = entry.ID
		createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1)
		if *noTrunc == false {
			createdBy = stringutils.Truncate(createdBy, 45)
			imageID = stringid.TruncateID(entry.ID)
		}

		if *human {
			created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago"
			size = units.HumanSize(float64(entry.Size))
		} else {
			created = time.Unix(entry.Created, 0).Format(time.RFC3339)
			size = strconv.FormatInt(entry.Size, 10)
		}

		fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment)
	}
	w.Flush()
	return nil
}
Exemplo n.º 12
0
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *streamformatter.StreamFormatter) (checksum string, err error) {
	out = ioutils.NewWriteFlusher(out)
	jsonRaw, err := s.graph.RawJSON(imgID)
	if err != nil {
		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
	}
	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))

	imgData := &registry.ImgData{
		ID: imgID,
	}

	// Send the json
	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
		if err == registry.ErrAlreadyExists {
			out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
			return "", nil
		}
		return "", err
	}

	layerData, err := s.graph.TempLayerArchive(imgID, sf, out)
	if err != nil {
		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
	}
	defer os.RemoveAll(layerData.Name())

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)

	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID,
		progressreader.New(progressreader.Config{
			In:        layerData,
			Out:       out,
			Formatter: sf,
			Size:      int(layerData.Size),
			NewLines:  false,
			ID:        stringid.TruncateID(imgData.ID),
			Action:    "Pushing",
		}), ep, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := r.PushImageChecksumRegistry(imgData, ep); err != nil {
		return "", err
	}

	out.Write(sf.FormatProgress(stringid.TruncateID(imgData.ID), "Image successfully pushed", nil))
	return imgData.Checksum, nil
}
Exemplo n.º 13
0
func (c *containerContext) Image() string {
	c.addHeader(imageHeader)
	if c.c.Image == "" {
		return "<no image>"
	}
	if c.trunc {
		if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) {
			return trunc
		}
	}
	return c.c.Image
}
Exemplo n.º 14
0
func (c *containerContext) Image() string {
	c.addHeader(imageHeader)
	if c.c.Image == "" {
		return "<no image>"
	}
	if c.trunc {
		if stringid.TruncateID(c.c.ImageID) == stringid.TruncateID(c.c.Image) {
			return stringutils.Truncate(c.c.Image, 12)
		}
	}
	return c.c.Image
}
Exemplo n.º 15
0
func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error {
	if img.Tag == "" {
		logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
		return nil
	}

	localNameRef, err := reference.WithTag(p.repoInfo, img.Tag)
	if err != nil {
		retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
		logrus.Debug(retErr.Error())
		return retErr
	}

	if err := v1.ValidateID(img.ID); err != nil {
		return err
	}

	progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName())
	success := false
	var lastErr error
	for _, ep := range p.repoInfo.Index.Mirrors {
		ep += "v1/"
		progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep))
		if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
			// Don't report errors when pulling from mirrors.
			logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err)
			continue
		}
		success = true
		break
	}
	if !success {
		for _, ep := range repoData.Endpoints {
			progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep)
			if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
				// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
				// As the error is also given to the output stream the user will see the error.
				lastErr = err
				progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err)
				continue
			}
			success = true
			break
		}
	}
	if !success {
		err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr)
		progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error())
		return err
	}
	return nil
}
Exemplo n.º 16
0
// CmdHistory shows the history of an image.
//
// Usage: docker history [OPTIONS] IMAGE
func (cli *DockerCli) CmdHistory(args ...string) error {
	cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true)
	quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
	noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
	cmd.Require(flag.Exact, 1)

	utils.ParseFlags(cmd, args, true)

	body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil))
	if err != nil {
		return err
	}

	outs := engine.NewTable("Created", 0)
	if _, err := outs.ReadListFrom(body); err != nil {
		return err
	}

	w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
	if !*quiet {
		fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
	}

	for _, out := range outs.Data {
		outID := out.Get("Id")
		if !*quiet {
			if *noTrunc {
				fmt.Fprintf(w, "%s\t", outID)
			} else {
				fmt.Fprintf(w, "%s\t", stringid.TruncateID(outID))
			}

			fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))

			if *noTrunc {
				fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
			} else {
				fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
			}
			fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("Size"))))
		} else {
			if *noTrunc {
				fmt.Fprintln(w, outID)
			} else {
				fmt.Fprintln(w, stringid.TruncateID(outID))
			}
		}
	}
	w.Flush()
	return nil
}
Exemplo n.º 17
0
func (b *Builder) clearTmp() {
	for c := range b.TmpContainers {
		rmConfig := &daemon.ContainerRmConfig{
			ForceRemove:  true,
			RemoveVolume: true,
		}
		if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
			fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err)
			return
		}
		delete(b.TmpContainers, c)
		fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c))
	}
}
Exemplo n.º 18
0
func runHistory(dockerCli *client.DockerCli, opts historyOptions) error {
	ctx := context.Background()

	history, err := dockerCli.Client().ImageHistory(ctx, opts.image)
	if err != nil {
		return err
	}

	w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0)

	if opts.quiet {
		for _, entry := range history {
			if opts.noTrunc {
				fmt.Fprintf(w, "%s\n", entry.ID)
			} else {
				fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID))
			}
		}
		w.Flush()
		return nil
	}

	var imageID string
	var createdBy string
	var created string
	var size string

	fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT")
	for _, entry := range history {
		imageID = entry.ID
		createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1)
		if !opts.noTrunc {
			createdBy = stringutils.Ellipsis(createdBy, 45)
			imageID = stringid.TruncateID(entry.ID)
		}

		if opts.human {
			created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago"
			size = units.HumanSize(float64(entry.Size))
		} else {
			created = time.Unix(entry.Created, 0).Format(time.RFC3339)
			size = strconv.FormatInt(entry.Size, 10)
		}

		fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment)
	}
	w.Flush()
	return nil
}
Exemplo n.º 19
0
// checkImageDeleteConflict determines whether there are any conflicts
// preventing deletion of the given image from this daemon. A hard conflict is
// any image which has the given image as a parent or any running container
// using the image. A soft conflict is any tags/digest referencing the given
// image or any stopped container using the image. If ignoreSoftConflicts is
// true, this function will not check for soft conflict conditions.
func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
	// Check if the image has any descendant images.
	if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
		return &imageDeleteConflict{
			hard:    true,
			imgID:   imgID,
			message: "image has dependent child images",
		}
	}

	if mask&conflictRunningContainer != 0 {
		// Check if any running container is using the image.
		running := func(c *container.Container) bool {
			return c.IsRunning() && c.ImageID == imgID
		}
		if container := daemon.containers.First(running); container != nil {
			return &imageDeleteConflict{
				imgID:   imgID,
				hard:    true,
				used:    true,
				message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
			}
		}
	}

	// Check if any repository tags/digest reference this image.
	if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 {
		return &imageDeleteConflict{
			imgID:   imgID,
			message: "image is referenced in multiple repositories",
		}
	}

	if mask&conflictStoppedContainer != 0 {
		// Check if any stopped containers reference this image.
		stopped := func(c *container.Container) bool {
			return !c.IsRunning() && c.ImageID == imgID
		}
		if container := daemon.containers.First(stopped); container != nil {
			return &imageDeleteConflict{
				imgID:   imgID,
				used:    true,
				message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
			}
		}
	}

	return nil
}
Exemplo n.º 20
0
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *streamformatter.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))

	image, err := s.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := image.TarLayer()
	if err != nil {
		return "", err
	}
	defer arch.Close()

	tf, err := s.graph.newTempFile()
	if err != nil {
		return "", err
	}
	defer func() {
		tf.Close()
		os.Remove(tf.Name())
	}()

	h := sha256.New()
	size, err := bufferToFile(tf, io.TeeReader(arch, h))
	if err != nil {
		return "", err
	}
	dgst := digest.NewDigest("sha256", h)

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)

	if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(),
		progressreader.New(progressreader.Config{
			In:        tf,
			Out:       out,
			Formatter: sf,
			Size:      int(size),
			NewLines:  false,
			ID:        stringid.TruncateID(img.ID),
			Action:    "Pushing",
		}), auth); err != nil {
		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image push failed", nil))
		return "", err
	}
	out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
	return dgst.String(), nil
}
Exemplo n.º 21
0
// Run the builder with the context. This is the lynchpin of this package. This
// will (barring errors):
//
// * call readContext() which will set up the temporary directory and unpack
//   the context into it.
// * read the dockerfile
// * parse the dockerfile
// * walk the parse tree and execute it by dispatching to handlers. If Remove
//   or ForceRemove is set, additional cleanup around containers happens after
//   processing.
// * Print a happy message and return the image ID.
//
func (b *builder) Run(context io.Reader) (string, error) {
	if err := b.readContext(context); err != nil {
		return "", err
	}

	defer func() {
		if err := os.RemoveAll(b.contextPath); err != nil {
			logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
		}
	}()

	if err := b.readDockerfile(); err != nil {
		return "", err
	}

	// some initializations that would not have been supplied by the caller.
	b.Config = &runconfig.Config{}

	b.TmpContainers = map[string]struct{}{}

	for i, n := range b.dockerfile.Children {
		select {
		case <-b.cancelled:
			logrus.Debug("Builder: build cancelled!")
			fmt.Fprintf(b.OutStream, "Build cancelled")
			return "", fmt.Errorf("Build cancelled")
		default:
			// Not cancelled yet, keep going...
		}
		if err := b.dispatch(i, n); err != nil {
			if b.ForceRemove {
				b.clearTmp()
			}
			return "", err
		}
		fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image))
		if b.Remove {
			b.clearTmp()
		}
	}

	if b.image == "" {
		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
	}

	fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image))
	return b.image, nil
}
Exemplo n.º 22
0
func (c *containerContext) ID() string {
	c.addHeader(idHeader)
	if c.trunc {
		return stringid.TruncateID(c.c.ID)
	}
	return c.c.ID
}
func (s *DockerSuite) TestContainerApiPause(c *check.C) {
	testRequires(c, DaemonIsLinux)
	defer unpauseAllContainers()
	out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30")
	ContainerID := strings.TrimSpace(out)

	status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil)
	c.Assert(err, check.IsNil)
	c.Assert(status, check.Equals, http.StatusNoContent)

	pausedContainers, err := getSliceOfPausedContainers()

	if err != nil {
		c.Fatalf("error thrown while checking if containers were paused: %v", err)
	}

	if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] {
		c.Fatalf("there should be one paused container and not %d", len(pausedContainers))
	}

	status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil)
	c.Assert(err, check.IsNil)
	c.Assert(status, check.Equals, http.StatusNoContent)

	pausedContainers, err = getSliceOfPausedContainers()

	if err != nil {
		c.Fatalf("error thrown while checking if containers were paused: %v", err)
	}

	if pausedContainers != nil {
		c.Fatalf("There should be no paused container.")
	}
}
Exemplo n.º 24
0
func (daemon *Daemon) checkImageDeleteSoftConflict(imgID image.ID) *imageDeleteConflict {
	// Check if any repository tags/digest reference this image.
	if len(daemon.referenceStore.References(imgID)) > 0 {
		return &imageDeleteConflict{
			imgID:   imgID,
			message: "image is referenced in one or more repositories",
		}
	}

	// Check if any stopped containers reference this image.
	for _, container := range daemon.List() {
		if container.IsRunning() {
			// Skip this as it was checked above in hard conflict conditions.
			continue
		}

		if container.ImageID == imgID {
			return &imageDeleteConflict{
				imgID:   imgID,
				used:    true,
				message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
			}
		}
	}

	return nil
}
Exemplo n.º 25
0
func (daemon *Daemon) checkImageDeleteHardConflict(imgID image.ID) *imageDeleteConflict {
	// Check if the image has any descendent images.
	if len(daemon.imageStore.Children(imgID)) > 0 {
		return &imageDeleteConflict{
			hard:    true,
			imgID:   imgID,
			message: "image has dependent child images",
		}
	}

	// Check if any running container is using the image.
	for _, container := range daemon.List() {
		if !container.IsRunning() {
			// Skip this until we check for soft conflicts later.
			continue
		}

		if container.ImageID == imgID {
			return &imageDeleteConflict{
				imgID:   imgID,
				hard:    true,
				used:    true,
				message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
			}
		}
	}

	return nil
}
Exemplo n.º 26
0
func (s *DockerSuite) TestCreateByImageID(c *check.C) {
	imageName := "testcreatebyimageid"
	imageID, err := buildImage(imageName,
		`FROM busybox
		MAINTAINER dockerio`,
		true)
	if err != nil {
		c.Fatal(err)
	}
	truncatedImageID := stringid.TruncateID(imageID)

	dockerCmd(c, "create", imageID)
	dockerCmd(c, "create", truncatedImageID)
	dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID))

	// Ensure this fails
	out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID))
	if exit == 0 {
		c.Fatalf("expected non-zero exit code; received %d", exit)
	}

	if expected := "Error parsing reference"; !strings.Contains(out, expected) {
		c.Fatalf(`Expected %q in output; got: %s`, expected, out)
	}

	out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID))
	if exit == 0 {
		c.Fatalf("expected non-zero exit code; received %d", exit)
	}

	if expected := "Unable to find image"; !strings.Contains(out, expected) {
		c.Fatalf(`Expected %q in output; got: %s`, expected, out)
	}
}
Exemplo n.º 27
0
// shouldRestart checks the restart policy and applies the rules to determine if
// the container's process should be restarted
func (m *containerMonitor) shouldRestart(exitCode int) bool {
	m.mux.Lock()
	defer m.mux.Unlock()

	// do not restart if the user or docker has requested that this container be stopped
	if m.shouldStop {
		m.container.HasBeenManuallyStopped = !m.supervisor.IsShuttingDown()
		return false
	}

	switch {
	case m.restartPolicy.IsAlways(), m.restartPolicy.IsUnlessStopped():
		return true
	case m.restartPolicy.IsOnFailure():
		// the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count
		if max := m.restartPolicy.MaximumRetryCount; max != 0 && m.failureCount > max {
			logrus.Debugf("stopping restart of container %s because maximum failure could of %d has been reached",
				stringid.TruncateID(m.container.ID), max)
			return false
		}

		return exitCode != 0
	}

	return false
}
Exemplo n.º 28
0
// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
//   The archive is stored on disk and will be automatically deleted as soon as has been read.
//   If output is not nil, a human-readable progress bar will be written to it.
func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) {
	image, err := graph.Get(id)
	if err != nil {
		return nil, err
	}
	tmp, err := graph.mktemp()
	if err != nil {
		return nil, err
	}
	defer os.RemoveAll(tmp)
	a, err := graph.TarLayer(image)
	if err != nil {
		return nil, err
	}
	progressReader := progressreader.New(progressreader.Config{
		In:        a,
		Out:       output,
		Formatter: sf,
		Size:      0,
		NewLines:  false,
		ID:        stringid.TruncateID(id),
		Action:    "Buffering to disk",
	})
	defer progressReader.Close()
	return archive.NewTempArchive(progressReader, tmp)
}
Exemplo n.º 29
0
// GetImage returns a pointer to an Image structure describing the image
// referred to by refOrID inside repository repoName.
func (store *TagStore) GetImage(repoName, refOrID string) (*image.Image, error) {
	repo, err := store.Get(repoName)

	if err != nil {
		return nil, err
	}
	if repo == nil {
		return nil, nil
	}

	store.Lock()
	defer store.Unlock()
	if imgID, exists := repo[refOrID]; exists {
		return store.graph.Get(imgID)
	}

	// If no matching tag is found, search through images for a matching image id
	// iff it looks like a short ID or would look like a short ID
	if stringid.IsShortID(stringid.TruncateID(refOrID)) {
		for _, revision := range repo {
			if strings.HasPrefix(revision, refOrID) {
				return store.graph.Get(revision)
			}
		}
	}

	return nil, nil
}
Exemplo n.º 30
0
func (daemon *Daemon) reserveName(id, name string) (string, error) {
	if !validContainerNamePattern.MatchString(name) {
		return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
	}

	if name[0] != '/' {
		name = "/" + name
	}

	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
		if !graphdb.IsNonUniqueNameError(err) {
			return "", err
		}

		conflictingContainer, err := daemon.GetByName(name)
		if err != nil {
			return "", err
		}
		return "", fmt.Errorf(
			"Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", strings.TrimPrefix(name, "/"),
			stringid.TruncateID(conflictingContainer.ID))

	}
	return name, nil
}