Beispiel #1
0
// TempLayerArchive creates a temporary archive of the given image's filesystem layer.
//   The archive is stored on disk and will be automatically deleted as soon as has been read.
//   If output is not nil, a human-readable progress bar will be written to it.
func (graph *Graph) TempLayerArchive(id string, sf *streamformatter.StreamFormatter, output io.Writer) (*archive.TempArchive, error) {
	image, err := graph.Get(id)
	if err != nil {
		return nil, err
	}
	tmp, err := graph.mktemp()
	if err != nil {
		return nil, err
	}
	defer os.RemoveAll(tmp)
	a, err := graph.TarLayer(image)
	if err != nil {
		return nil, err
	}
	progressReader := progressreader.New(progressreader.Config{
		In:        a,
		Out:       output,
		Formatter: sf,
		Size:      0,
		NewLines:  false,
		ID:        stringid.TruncateID(id),
		Action:    "Buffering to disk",
	})
	defer progressReader.Close()
	return archive.NewTempArchive(progressReader, tmp)
}
Beispiel #2
0
func (p *v1Pusher) pushImage(imgID, ep string) (checksum string, err error) {
	jsonRaw, err := p.getV1Config(imgID)
	if err != nil {
		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
	}
	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pushing", nil))

	compatibilityID, err := p.getV1ID(imgID)
	if err != nil {
		return "", err
	}

	// General rule is to use ID for graph accesses and compatibilityID for
	// calls to session.registry()
	imgData := &registry.ImgData{
		ID: compatibilityID,
	}

	// Send the json
	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
		if err == registry.ErrAlreadyExists {
			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image already pushed, skipping", nil))
			return "", nil
		}
		return "", err
	}

	layerData, err := p.graph.TempLayerArchive(imgID, p.sf, p.out)
	if err != nil {
		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
	}
	defer os.RemoveAll(layerData.Name())

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", imgID, layerData.Size)

	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(imgData.ID,
		progressreader.New(progressreader.Config{
			In:        layerData,
			Out:       p.out,
			Formatter: p.sf,
			Size:      layerData.Size,
			NewLines:  false,
			ID:        stringid.TruncateID(imgID),
			Action:    "Pushing",
		}), ep, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
		return "", err
	}

	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Image successfully pushed", nil))
	return imgData.Checksum, nil
}
Beispiel #3
0
// Import imports an image, getting the archived layer data either from
// inConfig (if src is "-"), or from a URI specified in src. Progress output is
// written to outStream. Repository and tag names can optionally be given in
// the repo and tag arguments, respectively.
func (s *TagStore) Import(src string, repo string, tag string, inConfig io.ReadCloser, outStream io.Writer, containerConfig *runconfig.Config) error {
	var (
		sf      = streamformatter.NewJSONStreamFormatter()
		archive io.ReadCloser
		resp    *http.Response
	)

	if src == "-" {
		archive = inConfig
	} else {
		inConfig.Close()
		u, err := url.Parse(src)
		if err != nil {
			return err
		}
		if u.Scheme == "" {
			u.Scheme = "http"
			u.Host = src
			u.Path = ""
		}
		outStream.Write(sf.FormatStatus("", "Downloading from %s", u))
		resp, err = httputils.Download(u.String())
		if err != nil {
			return err
		}
		progressReader := progressreader.New(progressreader.Config{
			In:        resp.Body,
			Out:       outStream,
			Formatter: sf,
			Size:      resp.ContentLength,
			NewLines:  true,
			ID:        "",
			Action:    "Importing",
		})
		archive = progressReader
	}

	defer archive.Close()
	img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, containerConfig)
	if err != nil {
		return err
	}
	// Optionally register the image at REPO/TAG
	if repo != "" {
		if err := s.Tag(repo, tag, img.ID, true); err != nil {
			return err
		}
	}
	outStream.Write(sf.FormatStatus("", img.ID))
	logID := img.ID
	if tag != "" {
		logID = utils.ImageReference(logID, tag)
	}

	s.eventsService.Log("import", logID, "")
	return nil
}
Beispiel #4
0
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
	out := p.config.OutStream

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))

	image, err := p.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := p.graph.TarLayer(image)
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// Send the layer
	layerUpload, err := bs.Create(context.Background())
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	digester := digest.Canonical.New()
	tee := io.TeeReader(arch, digester.Hash())

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(tee), // we'll take care of close here.
		Out:       out,
		Formatter: p.sf,

		// TODO(stevvooe): This may cause a size reporting error. Try to get
		// this from tar-split or elsewhere. The main issue here is that we
		// don't want to buffer to disk *just* to calculate the size.
		Size: img.Size,

		NewLines: false,
		ID:       stringid.TruncateID(img.ID),
		Action:   "Pushing",
	})

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
	nn, err := io.Copy(layerUpload, reader)
	if err != nil {
		return "", err
	}

	dgst := digester.Digest()
	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
		return "", err
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))

	return dgst, nil
}
Beispiel #5
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q", di.digest)

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	digestStr := di.digest.String()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(digestStr),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Beispiel #6
0
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
	out = utils.NewWriteFlusher(out)
	jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json"))
	if err != nil {
		return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
	}
	out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil))

	imgData := &registry.ImgData{
		ID: imgID,
	}

	// Send the json
	if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
		if err == registry.ErrAlreadyExists {
			out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil))
			return "", nil
		}
		return "", err
	}

	layerData, err := s.graph.TempLayerArchive(imgID, sf, out)
	if err != nil {
		return "", fmt.Errorf("Failed to generate layer archive: %s", err)
	}
	defer os.RemoveAll(layerData.Name())

	// Send the layer
	log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)

	checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID,
		progressreader.New(progressreader.Config{
			In:        layerData,
			Out:       out,
			Formatter: sf,
			Size:      int(layerData.Size),
			NewLines:  false,
			ID:        common.TruncateID(imgData.ID),
			Action:    "Pushing",
		}), ep, token, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
		return "", err
	}

	out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil))
	return imgData.Checksum, nil
}
Beispiel #7
0
func (p *v2Pusher) pushV2Layer(bs distribution.BlobService, l layer.Layer) (digest.Digest, error) {
	out := p.config.OutStream
	displayID := stringid.TruncateID(string(l.DiffID()))

	out.Write(p.sf.FormatProgress(displayID, "Preparing", nil))

	arch, err := l.TarStream()
	if err != nil {
		return "", err
	}

	// Send the layer
	layerUpload, err := bs.Create(context.Background())
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	// don't care if this fails; best effort
	size, _ := l.DiffSize()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(arch), // we'll take care of close here.
		Out:       out,
		Formatter: p.sf,
		Size:      size,
		NewLines:  false,
		ID:        displayID,
		Action:    "Pushing",
	})

	compressedReader := compress(reader)

	digester := digest.Canonical.New()
	tee := io.TeeReader(compressedReader, digester.Hash())

	out.Write(p.sf.FormatProgress(displayID, "Pushing", nil))
	nn, err := layerUpload.ReadFrom(tee)
	compressedReader.Close()
	if err != nil {
		return "", err
	}

	dgst := digester.Digest()
	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
		return "", err
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", l.DiffID(), dgst, nn)
	out.Write(p.sf.FormatProgress(displayID, "Pushed", nil))

	return dgst, nil
}
Beispiel #8
0
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil))

	image, err := s.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := image.TarLayer()
	if err != nil {
		return "", err
	}
	defer arch.Close()

	tf, err := s.graph.newTempFile()
	if err != nil {
		return "", err
	}
	defer func() {
		tf.Close()
		os.Remove(tf.Name())
	}()

	h := sha256.New()
	size, err := bufferToFile(tf, io.TeeReader(arch, h))
	if err != nil {
		return "", err
	}
	dgst := digest.NewDigest("sha256", h)

	// Send the layer
	log.Debugf("rendered layer for %s of [%d] size", img.ID, size)

	if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(),
		progressreader.New(progressreader.Config{
			In:        tf,
			Out:       out,
			Formatter: sf,
			Size:      int(size),
			NewLines:  false,
			ID:        common.TruncateID(img.ID),
			Action:    "Pushing",
		}), auth); err != nil {
		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
		return "", err
	}
	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil))
	return dgst.String(), nil
}
Beispiel #9
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)

	out := di.out

	broadcaster, found := p.poolAdd("pull", "img:"+di.img.ID)
	if found {
		broadcaster.Add(out)
		broadcaster.Wait()
		out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
		di.err <- nil
		return
	}

	broadcaster.Add(out)
	defer p.poolRemove("pull", "img:"+di.img.ID)
	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		di.err <- err
		return
	}
	di.tmpFile = tmpFile

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(di.img.ID),
		Action:    "Downloading",
	})
	io.Copy(tmpFile, reader)

	broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Beispiel #10
0
// Build is the main interface of the package, it gathers the Builder
// struct and calls builder.Run() to do all the real build job.
func Build(d *daemon.Daemon, buildConfig *Config) error {
	var (
		repoName string
		tag      string
		context  io.ReadCloser
	)
	sf := streamformatter.NewJSONStreamFormatter()

	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	if buildConfig.RemoteURL == "" {
		context = ioutil.NopCloser(buildConfig.Context)
	} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
		root, err := utils.GitClone(buildConfig.RemoteURL)
		if err != nil {
			return err
		}
		defer os.RemoveAll(root)

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return err
		}
		context = c
	} else if urlutil.IsURL(buildConfig.RemoteURL) {
		f, err := httputils.Download(buildConfig.RemoteURL)
		if err != nil {
			return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
		}
		defer f.Body.Close()
		ct := f.Header.Get("Content-Type")
		clen := int(f.ContentLength)
		contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)

		defer bodyReader.Close()

		if err != nil {
			return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
		}
		if contentType == httputils.MimeTypes.TextPlain {
			dockerFile, err := ioutil.ReadAll(bodyReader)
			if err != nil {
				return err
			}

			// When we're downloading just a Dockerfile put it in
			// the default name - don't allow the client to move/specify it
			buildConfig.DockerfileName = api.DefaultDockerfileName

			c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
			if err != nil {
				return err
			}
			context = c
		} else {
			// Pass through - this is a pre-packaged context, presumably
			// with a Dockerfile with the right name inside it.
			prCfg := progressreader.Config{
				In:        bodyReader,
				Out:       buildConfig.Stdout,
				Formatter: sf,
				Size:      clen,
				NewLines:  true,
				ID:        "Downloading context",
				Action:    buildConfig.RemoteURL,
			}
			context = progressreader.New(prCfg)
		}
	}

	defer context.Close()

	builder := &builder{
		Daemon: d,
		OutStream: &streamformatter.StdoutFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &streamformatter.StderrFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !buildConfig.SuppressOutput,
		UtilizeCache:    !buildConfig.NoCache,
		Remove:          buildConfig.Remove,
		ForceRemove:     buildConfig.ForceRemove,
		Pull:            buildConfig.Pull,
		OutOld:          buildConfig.Stdout,
		StreamFormatter: sf,
		AuthConfigs:     buildConfig.AuthConfigs,
		dockerfileName:  buildConfig.DockerfileName,
		cpuShares:       buildConfig.CPUShares,
		cpuPeriod:       buildConfig.CPUPeriod,
		cpuQuota:        buildConfig.CPUQuota,
		cpuSetCpus:      buildConfig.CPUSetCpus,
		cpuSetMems:      buildConfig.CPUSetMems,
		cgroupParent:    buildConfig.CgroupParent,
		memory:          buildConfig.Memory,
		memorySwap:      buildConfig.MemorySwap,
		cancelled:       buildConfig.WaitCancelled(),
		id:              stringid.GenerateRandomID(),
	}

	defer func() {
		builder.Daemon.Graph().Release(builder.id, builder.activeImages...)
	}()

	id, err := builder.Run(context)
	if err != nil {
		return err
	}
	if repoName != "" {
		return d.Repositories().Tag(repoName, tag, id, true)
	}
	return nil
}
Beispiel #11
0
func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (tagUpdated bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
	}
	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, tag)
	if err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	imgs, err := p.getImageInfos(verifiedManifest)
	if err != nil {
		return false, err
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)

		for _, d := range downloads {
			p.poolRemoveWithError("pull", d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		img := imgs[i]

		p.graph.Retain(p.sessionID, img.id)
		layerIDs = append(layerIDs, img.id)

		p.graph.imageMutex.Lock(img.id)

		// Check if exists
		if p.graph.Exists(img.id) {
			if err := p.validateImageInGraph(img.id, imgs, i); err != nil {
				p.graph.imageMutex.Unlock(img.id)
				return false, fmt.Errorf("image validation failed: %v", err)
			}
			logrus.Debugf("Image already exists: %s", img.id)
			p.graph.imageMutex.Unlock(img.id)
			continue
		}
		p.graph.imageMutex.Unlock(img.id)

		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.id), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:      img,
			imgIndex: i,
			poolKey:  "v2layer:" + img.id,
			digest:   verifiedManifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}
		d.tmpFile = tmpFile

		downloads = append(downloads, d)

		broadcaster, found := p.poolAdd("pull", d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}
			continue
		}

		d.tmpFile.Seek(0, 0)
		err := func() error {
			reader := progressreader.New(progressreader.Config{
				In:        d.tmpFile,
				Out:       d.broadcaster,
				Formatter: p.sf,
				Size:      d.size,
				NewLines:  false,
				ID:        stringid.TruncateID(d.img.id),
				Action:    "Extracting",
			})

			p.graph.imagesMutex.Lock()
			defer p.graph.imagesMutex.Unlock()

			p.graph.imageMutex.Lock(d.img.id)
			defer p.graph.imageMutex.Unlock(d.img.id)

			// Must recheck the data on disk if any exists.
			// This protects against races where something
			// else is written to the graph under this ID
			// after attemptIDReuse.
			if p.graph.Exists(d.img.id) {
				if err := p.validateImageInGraph(d.img.id, imgs, d.imgIndex); err != nil {
					return fmt.Errorf("image validation failed: %v", err)
				}
			}

			if err := p.graph.register(d.img, reader); err != nil {
				return err
			}

			if err := p.graph.setLayerDigest(d.img.id, d.digest); err != nil {
				return err
			}

			if err := p.graph.setV1CompatibilityConfig(d.img.id, d.img.v1Compatibility); err != nil {
				return err
			}

			return nil
		}()
		if err != nil {
			return false, err
		}

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.id), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #12
0
func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
	out := p.config.OutStream

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err = p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	// By using a pipeWriter for each of the downloads to write their progress
	// to, we can avoid an issue where this function returns an error but
	// leaves behind running download goroutines. By splitting the writer
	// with a pipe, we can close the pipe if there is any error, consequently
	// causing each download to cancel due to an error writing to this pipe.
	pipeReader, pipeWriter := io.Pipe()
	go func() {
		if _, err := io.Copy(out, pipeReader); err != nil {
			logrus.Errorf("error copying from layer download progress reader: %s", err)
		}
	}()
	defer func() {
		if err != nil {
			// All operations on the pipe are synchronous. This call will wait
			// until all current readers/writers are done using the pipe then
			// set the error. All successive reads/writes will return with this
			// error.
			pipeWriter.CloseWithError(errors.New("download canceled"))
		}
	}()

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
			continue
		}
		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:    img,
			digest: manifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
			out: pipeWriter,
		}
		downloads = append(downloads, d)

		go p.download(d)
	}

	// run clean for all downloads to prevent leftovers
	for _, d := range downloads {
		defer func(d *downloadInfo) {
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}(d)
	}

	var tagUpdated bool
	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}
		if d.layer == nil {
			continue
		}
		// if tmpFile is empty assume download and extracted elsewhere
		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       out,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.img.ID),
			Action:    "Extracting",
		})

		err = p.graph.Register(d.img, reader)
		if err != nil {
			return false, err
		}

		if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
			return false, err
		}

		// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
		out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #13
0
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
	out := p.config.OutStream

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))

	image, err := p.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := p.graph.TarLayer(image)
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// Send the layer
	layerUpload, err := bs.Create(context.Background())
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(arch), // we'll take care of close here.
		Out:       out,
		Formatter: p.sf,

		// TODO(stevvooe): This may cause a size reporting error. Try to get
		// this from tar-split or elsewhere. The main issue here is that we
		// don't want to buffer to disk *just* to calculate the size.
		Size: img.Size,

		NewLines: false,
		ID:       stringid.TruncateID(img.ID),
		Action:   "Pushing",
	})

	digester := digest.Canonical.New()
	// HACK: The MultiWriter doesn't write directly to layerUpload because
	// we must make sure the ReadFrom is used, not Write. Using Write would
	// send a PATCH request for every Write call.
	pipeReader, pipeWriter := io.Pipe()
	// Use a bufio.Writer to avoid excessive chunking in HTTP request.
	bufWriter := bufio.NewWriterSize(io.MultiWriter(pipeWriter, digester.Hash()), compressionBufSize)
	compressor := gzip.NewWriter(bufWriter)

	go func() {
		_, err := io.Copy(compressor, reader)
		if err == nil {
			err = compressor.Close()
		}
		if err == nil {
			err = bufWriter.Flush()
		}
		if err != nil {
			pipeWriter.CloseWithError(err)
		} else {
			pipeWriter.Close()
		}
	}()

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
	nn, err := layerUpload.ReadFrom(pipeReader)
	pipeReader.Close()
	if err != nil {
		return "", err
	}

	dgst := digester.Digest()
	if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
		return "", err
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))

	return dgst, nil
}
Beispiel #14
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")

	cmd.Require(flag.Exact, 1)
	cmd.ParseFlags(args, true)

	var (
		context  archive.Archive
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil
	if cmd.Arg(0) == "-" {
		// As a special case, 'docker build -' will build from either an empty context with the
		// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
		buf := bufio.NewReader(cli.in)
		magic, err := buf.Peek(tarHeaderSize)
		if err != nil && err != io.EOF {
			return fmt.Errorf("failed to peek context header from STDIN: %v", err)
		}
		if !archive.IsArchive(magic) {
			dockerfile, err := ioutil.ReadAll(buf)
			if err != nil {
				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
			}

			// -f option has no meaning when we're reading it from stdin,
			// so just use our default Dockerfile name
			*dockerfileName = api.DefaultDockerfileName
			context, err = archive.Generate(*dockerfileName, string(dockerfile))
		} else {
			context = ioutil.NopCloser(buf)
		}
	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
		isRemote = true
	} else {
		root := cmd.Arg(0)
		if urlutil.IsGitURL(root) {
			root, err = utils.GitClone(root)
			if err != nil {
				return err
			}
			defer os.RemoveAll(root)
		}
		if _, err := os.Stat(root); err != nil {
			return err
		}

		absRoot, err := filepath.Abs(root)
		if err != nil {
			return err
		}

		filename := *dockerfileName // path to Dockerfile

		if *dockerfileName == "" {
			// No -f/--file was specified so use the default
			*dockerfileName = api.DefaultDockerfileName
			filename = filepath.Join(absRoot, *dockerfileName)

			// Just to be nice ;-) look for 'dockerfile' too but only
			// use it if we found it, otherwise ignore this check
			if _, err = os.Lstat(filename); os.IsNotExist(err) {
				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
				if _, err = os.Lstat(tmpFN); err == nil {
					*dockerfileName = strings.ToLower(*dockerfileName)
					filename = tmpFN
				}
			}
		}

		origDockerfile := *dockerfileName // used for error msg
		if filename, err = filepath.Abs(filename); err != nil {
			return err
		}

		// Verify that 'filename' is within the build context
		filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
		if err != nil {
			return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
		}

		// Now reset the dockerfileName to be relative to the build context
		*dockerfileName, err = filepath.Rel(absRoot, filename)
		if err != nil {
			return err
		}
		// And canonicalize dockerfile name to a platform-independent one
		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
		if err != nil {
			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err)
		}

		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
		}
		var includes = []string{"."}

		excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore"))
		if err != nil {
			return err
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed.  The deamon will remove them for us, if needed, after it
		// parses the Dockerfile.
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(*dockerfileName, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", *dockerfileName)
		}

		if err := utils.ValidateContextDirectory(root, excludes); err != nil {
			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
		}
		options := &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		}
		context, err = archive.TarWithOptions(root, options)
		if err != nil {
			return err
		}
	}

	// windows: show error message about modified file permissions
	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
	if runtime.GOOS == "windows" {
		logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter(false)
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       cli.out,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}
	// Send the build context
	v := &url.Values{}

	//Check if the given image name can be resolved
	if *tag != "" {
		repository, tag := parsers.ParseRepositoryTag(*tag)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := graph.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	v.Set("t", *tag)

	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", *dockerfileName)

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))

	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}
	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}
	return err
}
Beispiel #15
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)

	out := p.config.OutStream

	if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil {
		if c != nil {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Layer already being pulled by another client. Waiting.", nil))
			<-c
			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
		} else {
			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.ID, err)
		}
		di.err <- nil
		return
	}

	defer p.poolRemove("pull", "img:"+di.img.ID)
	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		di.err <- err
		return
	}

	blobs := p.repo.Blobs(nil)

	desc, err := blobs.Stat(nil, di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(nil, di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       out,
		Formatter: p.sf,
		Size:      int(di.size),
		NewLines:  false,
		ID:        stringid.TruncateID(di.img.ID),
		Action:    "Downloading",
	})
	io.Copy(tmpFile, reader)

	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
	di.tmpFile = tmpFile
	di.layer = layerDownload

	di.err <- nil
}
Beispiel #16
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q", di.digest)

	blobs := p.repo.Blobs(context.Background())

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	di.size, err = layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		di.size = 0
	} else {
		// Restore the seek offset at the beginning of the stream.
		_, err = layerDownload.Seek(0, os.SEEK_SET)
		if err != nil {
			di.err <- err
			return
		}
	}

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	digestStr := di.digest.String()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(digestStr),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Beispiel #17
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true)
	flTags := opts.NewListOpts(validateTag)
	cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
	flBuildArg := opts.NewListOpts(opts.ValidateEnv)
	cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")

	ulimits := make(map[string]*ulimit.Ulimit)
	flUlimits := opts.NewUlimitOpt(&ulimits)
	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")

	cmd.Require(flag.Exact, 1)

	// For trusted pull on "FROM <image>" instruction.
	addTrustedFlags(cmd, true)

	cmd.ParseFlags(args, true)

	var (
		context  io.ReadCloser
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil

	specifiedContext := cmd.Arg(0)

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
	)

	switch {
	case specifiedContext == "-":
		tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
	case urlutil.IsGitURL(specifiedContext) && hasGit:
		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
	case urlutil.IsURL(specifiedContext):
		tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName)
	default:
		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
	}

	if err != nil {
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	// Resolve the FROM lines in the Dockerfile to trusted digest references
	// using Notary. On a successful build, we must tag the resolved digests
	// to the original name specified in the Dockerfile.
	newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
	if err != nil {
		return fmt.Errorf("unable to process Dockerfile: %v", err)
	}
	defer newDockerfile.Close()

	// And canonicalize dockerfile name to a platform-independent one
	relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
	if err != nil {
		return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
	}

	f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile. Ignore errors here, as they will have been
	// caught by ValidateContextDirectory above.
	var includes = []string{"."}
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", relDockerfile)
	}

	context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	})
	if err != nil {
		return err
	}

	// Wrap the tar archive to replace the Dockerfile entry with the rewritten
	// Dockerfile which uses trusted pulls.
	context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile)

	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	sf := streamformatter.NewStreamFormatter()
	var body io.Reader = progressreader.New(progressreader.Config{
		In:        context,
		Out:       cli.out,
		Formatter: sf,
		NewLines:  true,
		ID:        "",
		Action:    "Sending build context to Docker daemon",
	})

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	// Send the build context
	v := url.Values{
		"t": flTags.GetAll(),
	}
	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10))
	v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", relDockerfile)

	ulimitsVar := flUlimits.GetList()
	ulimitsJSON, err := json.Marshal(ulimitsVar)
	if err != nil {
		return err
	}
	v.Set("ulimits", string(ulimitsJSON))

	// collect all the build-time environment variables for the container
	buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll())
	buildArgsJSON, err := json.Marshal(buildArgs)
	if err != nil {
		return err
	}
	v.Set("buildargs", string(buildArgsJSON))

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
	headers.Set("Content-Type", "application/tar")

	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}

	serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)

	// Windows: show error message about modified file permissions.
	if runtime.GOOS == "windows" {
		h, err := httputils.ParseServerHeader(serverResp.header.Get("Server"))
		if err == nil {
			if h.OS != "windows" {
				fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
			}
		}
	}

	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}

	if err != nil {
		return err
	}

	// Since the build was successful, now we must tag any of the resolved
	// images from the above Dockerfile rewrite.
	for _, resolved := range resolvedTags {
		if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
			return err
		}
	}

	return nil
}
Beispiel #18
0
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if verified {
		logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	// downloadInfo is used to pass information from download to extractor
	type downloadInfo struct {
		imgJSON    []byte
		img        *image.Image
		digest     digest.Digest
		tmpFile    *os.File
		length     int64
		downloaded bool
		err        chan error
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
				} else {
					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        stringid.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					return fmt.Errorf("image layer digest verification failed for %q", di.digest)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))

				logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		downloads[i].err = make(chan error)
		go func(di *downloadInfo) {
			di.err <- downloadFunc(di)
		}(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        stringid.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				if err := s.graph.SetDigest(d.img.ID, d.digest); err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if localDigest != remoteDigest { // this is not a verification check.
		// NOTE(stevvooe): This is a very defensive branch and should never
		// happen, since all manifest digest implementations use the same
		// algorithm.
		logrus.WithFields(
			logrus.Fields{
				"local":  localDigest,
				"remote": remoteDigest,
			}).Debugf("local digest does not match remote")

		out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
	}

	out.Write(sf.FormatStatus("", "Digest: %s", localDigest))

	if tag == localDigest.String() {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.

		if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
			return false, err
		}
	}

	if !utils.DigestReference(tag) {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Beispiel #19
0
func (p *v1Puller) downloadLayer(out io.Writer, v1LayerID, endpoint string, parentID layer.ChainID, layerSize int64, layersDownloaded *bool) (l layer.Layer, err error) {
	// ensure no two downloads of the same layer happen at the same time
	poolKey := "layer:" + v1LayerID
	broadcaster, found := p.config.Pool.add(poolKey)
	broadcaster.Add(out)
	if found {
		logrus.Debugf("Image (id: %s) pull is already running, skipping", v1LayerID)
		if err = broadcaster.Wait(); err != nil {
			return nil, err
		}
		layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name)
		if err != nil {
			return nil, err
		}
		// Does the layer actually exist
		l, err := p.config.LayerStore.Get(layerID)
		if err != nil {
			return nil, err
		}
		return l, nil
	}

	// This must use a closure so it captures the value of err when
	// the function returns, not when the 'defer' is evaluated.
	defer func() {
		p.config.Pool.removeWithError(poolKey, err)
	}()

	retries := 5
	for j := 1; j <= retries; j++ {
		// Get the layer
		status := "Pulling fs layer"
		if j > 1 {
			status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
		}
		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), status, nil))
		layerReader, err := p.session.GetRemoteImageLayer(v1LayerID, endpoint, layerSize)
		if uerr, ok := err.(*url.Error); ok {
			err = uerr.Err
		}
		if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
			continue
		} else if err != nil {
			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling dependent layers", nil))
			return nil, err
		}
		*layersDownloaded = true
		defer layerReader.Close()

		reader := progressreader.New(progressreader.Config{
			In:        layerReader,
			Out:       broadcaster,
			Formatter: p.sf,
			Size:      layerSize,
			NewLines:  false,
			ID:        stringid.TruncateID(v1LayerID),
			Action:    "Downloading",
		})

		inflatedLayerData, err := archive.DecompressStream(reader)
		if err != nil {
			return nil, fmt.Errorf("could not get decompression stream: %v", err)
		}

		l, err := p.config.LayerStore.Register(inflatedLayerData, parentID)
		if err != nil {
			return nil, fmt.Errorf("failed to register layer: %v", err)
		}
		logrus.Debugf("layer %s registered successfully", l.DiffID())

		if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
			continue
		} else if err != nil {
			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error downloading dependent layers", nil))
			return nil, err
		}

		// Cache mapping from this v1 ID to content-addressable layer ID
		if err := p.v1IDService.Set(v1LayerID, p.repoInfo.Index.Name, l.ChainID()); err != nil {
			return nil, err
		}

		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Download complete", nil))
		broadcaster.Close()
		return l, nil
	}

	// not reached
	return nil, nil
}
Beispiel #20
0
func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *streamformatter.StreamFormatter) (bool, error) {
	history, err := r.GetRemoteHistory(imgID, endpoint)
	if err != nil {
		return false, err
	}
	out.Write(sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	layersDownloaded := false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
			<-c
		}
		defer s.poolRemove("pull", "layer:"+id)

		if !s.graph.Exists(id) {
			out.Write(sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
			var (
				imgJSON []byte
				imgSize int
				err     error
				img     *image.Image
			)
			retries := 5
			for j := 1; j <= retries; j++ {
				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint)
				if err != nil && j == retries {
					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				}
				img, err = image.NewImgJSON(imgJSON)
				layersDownloaded = true
				if err != nil && j == retries {
					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else {
					break
				}
			}

			for j := 1; j <= retries; j++ {
				// Get the layer
				status := "Pulling fs layer"
				if j > 1 {
					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
				}
				out.Write(sf.FormatProgress(stringid.TruncateID(id), status, nil))
				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
				if uerr, ok := err.(*url.Error); ok {
					err = uerr.Err
				}
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				}
				layersDownloaded = true
				defer layer.Close()

				err = s.graph.Register(img,
					progressreader.New(progressreader.Config{
						In:        layer,
						Out:       out,
						Formatter: sf,
						Size:      imgSize,
						NewLines:  false,
						ID:        stringid.TruncateID(id),
						Action:    "Downloading",
					}))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
					return layersDownloaded, err
				} else {
					break
				}
			}
		}
		out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
	}
	return layersDownloaded, nil
}
Beispiel #21
0
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)

	manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return false, err
	}

	if verified {
		log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        common.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
					verified = false
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        common.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if manifestDigest != "" {
		out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	if utils.DigestReference(tag) {
		if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Beispiel #22
0
// hyper build [OPTIONS] PATH
func (cli *HyperClient) HyperCmdBuild(args ...string) error {
	var opts struct {
		ImageName      string `long:"tag" short:"t" default:"" value-name:"\"\"" default-mask:"-" description:"Repository name (and optionally a tag) to be applied to the resulting image in case of success"`
		DockerfileName string `long:"file" short:"f" default:"" value-name:"\"\"" default-mask:"-" description:"Customized docker file"`
	}

	var parser = gflag.NewParser(&opts, gflag.Default)
	parser.Usage = "build [OPTIONS] PATH\n\nBuild a new image from the source code at PATH"
	args, err := parser.ParseArgs(args)
	if err != nil {
		if !strings.Contains(err.Error(), "Usage") {
			return err
		} else {
			return nil
		}
	}

	if len(args) == 0 {
		return fmt.Errorf("%s: \"build\" requires a minimum of 1 argument, See 'hyper build --help'.", os.Args[0])
	}
	var (
		filename = ""
		context  archive.Archive
		name     = ""
	)
	root := args[0]
	if _, err := os.Stat(root); err != nil {
		return err
	}

	absRoot, err := filepath.Abs(root)
	if err != nil {
		return err
	}

	filename = opts.DockerfileName // path to Dockerfile

	if opts.DockerfileName == "" {
		// No -f/--file was specified so use the default
		opts.DockerfileName = api.DefaultDockerfileName
		filename = filepath.Join(absRoot, opts.DockerfileName)

		// Just to be nice ;-) look for 'dockerfile' too but only
		// use it if we found it, otherwise ignore this check
		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			tmpFN := path.Join(absRoot, strings.ToLower(opts.DockerfileName))
			if _, err = os.Lstat(tmpFN); err == nil {
				opts.DockerfileName = strings.ToLower(opts.DockerfileName)
				filename = tmpFN
			}
		}
	}

	origDockerfile := opts.DockerfileName // used for error msg
	if filename, err = filepath.Abs(filename); err != nil {
		return err
	}

	// Verify that 'filename' is within the build context
	filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
	if err != nil {
		return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
	}

	// Now reset the dockerfileName to be relative to the build context
	opts.DockerfileName, err = filepath.Rel(absRoot, filename)
	if err != nil {
		return err
	}
	// And canonicalize dockerfile name to a platform-independent one
	opts.DockerfileName, err = archive.CanonicalTarNameForPath(opts.DockerfileName)
	if err != nil {
		return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", opts.DockerfileName, err)
	}

	if _, err = os.Lstat(filename); os.IsNotExist(err) {
		return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
	}
	var includes = []string{"."}

	f, err := os.Open(filepath.Join(root, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}
	defer f.Close()

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(opts.DockerfileName, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", opts.DockerfileName)
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}
	options := &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	}
	context, err = archive.TarWithOptions(root, options)
	if err != nil {
		return err
	}
	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter()
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       os.Stdout,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	if opts.ImageName == "" {
		// set a image name
		name = rand.RandStr(10, "alphanum")
	} else {
		name = opts.ImageName
		repository, tag := parsers.ParseRepositoryTag(name)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}
	v := url.Values{}
	v.Set("name", name)
	headers := http.Header(make(map[string][]string))
	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	err = cli.stream("POST", "/image/build?"+v.Encode(), body, cli.out, headers)
	if err != nil {
		return err
	}
	return nil
}
Beispiel #23
0
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
	// get filename from URL
	u, err := url.Parse(srcURL)
	if err != nil {
		return
	}
	path := filepath.FromSlash(u.Path) // Ensure in platform semantics
	if strings.HasSuffix(path, string(os.PathSeparator)) {
		path = path[:len(path)-1]
	}
	parts := strings.Split(path, string(os.PathSeparator))
	filename := parts[len(parts)-1]
	if filename == "" {
		err = fmt.Errorf("cannot determine filename from url: %s", u)
		return
	}

	// Initiate the download
	resp, err := httputils.Download(srcURL)
	if err != nil {
		return
	}

	// Prepare file in a tmp dir
	tmpDir, err := ioutils.TempDir("", "docker-remote")
	if err != nil {
		return
	}
	defer func() {
		if err != nil {
			os.RemoveAll(tmpDir)
		}
	}()
	tmpFileName := filepath.Join(tmpDir, filename)
	tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
	if err != nil {
		return
	}

	// Download and dump result to tmp file
	if _, err = io.Copy(tmpFile, progressreader.New(progressreader.Config{
		In: resp.Body,
		// TODO: make progressreader streamformatter agnostic
		Out:       b.Stdout.(*streamformatter.StdoutFormatter).Writer,
		Formatter: b.Stdout.(*streamformatter.StdoutFormatter).StreamFormatter,
		Size:      resp.ContentLength,
		NewLines:  true,
		ID:        "",
		Action:    "Downloading",
	})); err != nil {
		tmpFile.Close()
		return
	}
	fmt.Fprintln(b.Stdout)
	// ignoring error because the file was already opened successfully
	tmpFileSt, err := tmpFile.Stat()
	if err != nil {
		return
	}
	tmpFile.Close()

	// Set the mtime to the Last-Modified header value if present
	// Otherwise just remove atime and mtime
	mTime := time.Time{}

	lastMod := resp.Header.Get("Last-Modified")
	if lastMod != "" {
		// If we can't parse it then just let it default to 'zero'
		// otherwise use the parsed time value
		if parsedMTime, err := http.ParseTime(lastMod); err == nil {
			mTime = parsedMTime
		}
	}

	if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
		return
	}

	// Calc the checksum, even if we're using the cache
	r, err := archive.Tar(tmpFileName, archive.Uncompressed)
	if err != nil {
		return
	}
	tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
	if err != nil {
		return
	}
	if _, err = io.Copy(ioutil.Discard, tarSum); err != nil {
		return
	}
	hash := tarSum.Sum(nil)
	r.Close()
	return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil
}
Beispiel #24
0
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
	out := p.config.OutStream

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))

	image, err := p.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := p.graph.TarLayer(image)
	if err != nil {
		return "", err
	}

	tf, err := p.graph.newTempFile()
	if err != nil {
		return "", err
	}
	defer func() {
		tf.Close()
		os.Remove(tf.Name())
	}()

	size, dgst, err := bufferToFile(tf, arch)
	if err != nil {
		return "", err
	}

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
	layerUpload, err := bs.Create(nil)
	if err != nil {
		return "", err
	}
	defer layerUpload.Close()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(tf),
		Out:       out,
		Formatter: p.sf,
		Size:      int(size),
		NewLines:  false,
		ID:        stringid.TruncateID(img.ID),
		Action:    "Pushing",
	})
	n, err := layerUpload.ReadFrom(reader)
	if err != nil {
		return "", err
	}
	if n != size {
		return "", fmt.Errorf("short upload: only wrote %d of %d", n, size)
	}

	desc := distribution.Descriptor{Digest: dgst}
	if _, err := layerUpload.Commit(nil, desc); err != nil {
		return "", err
	}

	out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))

	return dgst, nil
}
Beispiel #25
0
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {

	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
		origPath = origPath[1:]
	}
	origPath = strings.TrimPrefix(origPath, "./")

	// Twiddle the destPath when its a relative path - meaning, make it
	// relative to the WORKINGDIR
	if !filepath.IsAbs(destPath) {
		hasSlash := strings.HasSuffix(destPath, "/")
		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)

		// Make sure we preserve any trailing slash
		if hasSlash {
			destPath += "/"
		}
	}

	// In the remote/URL case, download it and gen its hashcode
	if urlutil.IsURL(origPath) {
		if !allowRemote {
			return fmt.Errorf("Source can't be a URL for %s", cmdName)
		}

		ci := copyInfo{}
		ci.origPath = origPath
		ci.hash = origPath // default to this but can change
		ci.destPath = destPath
		ci.decompress = false
		*cInfos = append(*cInfos, &ci)

		// Initiate the download
		resp, err := httputils.Download(ci.origPath)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}
		ci.tmpDir = tmpDirName

		// Create a tmp file within our tmp dir
		tmpFileName := filepath.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
			In:        resp.Body,
			Out:       b.OutOld,
			Formatter: b.StreamFormatter,
			Size:      int(resp.ContentLength),
			NewLines:  true,
			ID:        "",
			Action:    "Downloading",
		})); err != nil {
			tmpFile.Close()
			return err
		}
		fmt.Fprintf(b.OutStream, "\n")
		tmpFile.Close()

		// Set the mtime to the Last-Modified header value if present
		// Otherwise just remove atime and mtime
		times := make([]syscall.Timespec, 2)

		lastMod := resp.Header.Get("Last-Modified")
		if lastMod != "" {
			mTime, err := http.ParseTime(lastMod)
			// If we can't parse it then just let it default to 'zero'
			// otherwise use the parsed time value
			if err == nil {
				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
			}
		}

		if err := system.UtimesNano(tmpFileName, times); err != nil {
			return err
		}

		ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(ci.destPath, "/") {
			u, err := url.Parse(origPath)
			if err != nil {
				return err
			}
			path := u.Path
			if strings.HasSuffix(path, "/") {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, "/")
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			ci.destPath = ci.destPath + filename
		}

		// Calc the checksum, even if we're using the cache
		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
		if err != nil {
			return err
		}
		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
		if err != nil {
			return err
		}
		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
			return err
		}
		ci.hash = tarSum.Sum(nil)
		r.Close()

		return nil
	}

	// Deal with wildcards
	if allowWildcards && ContainsWildcards(origPath) {
		for _, fileInfo := range b.context.GetSums() {
			if fileInfo.Name() == "" {
				continue
			}
			match, _ := filepath.Match(origPath, fileInfo.Name())
			if !match {
				continue
			}

			// Note we set allowWildcards to false in case the name has
			// a * in it
			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
		}
		return nil
	}

	// Must be a dir or a file

	if err := b.checkPathForAddition(origPath); err != nil {
		return err
	}
	fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))

	ci := copyInfo{}
	ci.origPath = origPath
	ci.hash = origPath
	ci.destPath = destPath
	ci.decompress = allowDecompression
	*cInfos = append(*cInfos, &ci)

	// Deal with the single file case
	if !fi.IsDir() {
		// This will match first file in sums of the archive
		fis := b.context.GetSums().GetFile(ci.origPath)
		if fis != nil {
			ci.hash = "file:" + fis.Sum()
		}
		return nil
	}

	// Must be a dir
	var subfiles []string
	absOrigPath := filepath.Join(b.contextPath, ci.origPath)

	// Add a trailing / to make sure we only pick up nested files under
	// the dir and not sibling files of the dir that just happen to
	// start with the same chars
	if !strings.HasSuffix(absOrigPath, "/") {
		absOrigPath += "/"
	}

	// Need path w/o / too to find matching dir w/o trailing /
	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]

	for _, fileInfo := range b.context.GetSums() {
		absFile := filepath.Join(b.contextPath, fileInfo.Name())
		// Any file in the context that starts with the given path will be
		// picked up and its hashcode used.  However, we'll exclude the
		// root dir itself.  We do this for a coupel of reasons:
		// 1 - ADD/COPY will not copy the dir itself, just its children
		//     so there's no reason to include it in the hash calc
		// 2 - the metadata on the dir will change when any child file
		//     changes.  This will lead to a miss in the cache check if that
		//     child file is in the .dockerignore list.
		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
			subfiles = append(subfiles, fileInfo.Sum())
		}
	}
	sort.Strings(subfiles)
	hasher := sha256.New()
	hasher.Write([]byte(strings.Join(subfiles, ",")))
	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))

	return nil
}
Beispiel #26
0
func (p *v1Pusher) pushImage(v1Image v1Image, ep string) (checksum string, err error) {
	v1ID := v1Image.V1ID()

	jsonRaw := v1Image.Config()
	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pushing", nil))

	// General rule is to use ID for graph accesses and compatibilityID for
	// calls to session.registry()
	imgData := &registry.ImgData{
		ID: v1ID,
	}

	// Send the json
	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
		if err == registry.ErrAlreadyExists {
			p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image already pushed, skipping", nil))
			return "", nil
		}
		return "", err
	}

	l := v1Image.Layer()

	arch, err := l.TarStream()
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// don't care if this fails; best effort
	size, _ := l.Size()

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(arch),
		Out:       p.out,
		Formatter: p.sf,
		Size:      size,
		NewLines:  false,
		ID:        stringid.TruncateID(v1ID),
		Action:    "Pushing",
	})

	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
		return "", err
	}

	if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.ChainID()); err != nil {
		logrus.Warnf("Could not set v1 ID mapping: %v", err)
	}

	p.out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Image successfully pushed", nil))
	return imgData.Checksum, nil
}
Beispiel #27
0
func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) {
	tagOrDigest := ""
	if tagged, isTagged := ref.(reference.Tagged); isTagged {
		tagOrDigest = tagged.Tag()
	} else if digested, isDigested := ref.(reference.Digested); isDigested {
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}
	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
	if err != nil {
		return false, err
	}

	rootFS := image.NewRootFS()

	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	defer func() {
		for _, d := range downloads {
			p.config.Pool.removeWithError(d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	// Image history converted to the new format
	var history []image.History

	poolKey := "v2layer:"
	notFoundLocally := false

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		blobSum := verifiedManifest.FSLayers[i].BlobSum
		poolKey += blobSum.String()

		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return false, err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return false, err
		}
		history = append(history, h)

		if throwAway.ThrowAway {
			continue
		}

		// Do we have a layer on disk corresponding to the set of
		// blobsums up to this point?
		if !notFoundLocally {
			notFoundLocally = true
			diffID, err := p.blobSumService.GetDiffID(blobSum)
			if err == nil {
				rootFS.Append(diffID)
				if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil {
					notFoundLocally = false
					logrus.Debugf("Layer already exists: %s", blobSum.String())
					out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil))
					defer layer.ReleaseAndLog(p.config.LayerStore, l)
					continue
				} else {
					rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1]
				}
			}
		}

		out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil))

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}

		d := &downloadInfo{
			poolKey: poolKey,
			digest:  blobSum,
			tmpFile: tmpFile,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		downloads = append(downloads, d)

		broadcaster, found := p.config.Pool.add(d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}

			diffID, err := p.blobSumService.GetDiffID(d.digest)
			if err != nil {
				return false, err
			}
			rootFS.Append(diffID)

			l, err := p.config.LayerStore.Get(rootFS.ChainID())
			if err != nil {
				return false, err
			}

			defer layer.ReleaseAndLog(p.config.LayerStore, l)

			continue
		}

		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       d.broadcaster,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.digest.String()),
			Action:    "Extracting",
		})

		inflatedLayerData, err := archive.DecompressStream(reader)
		if err != nil {
			return false, fmt.Errorf("could not get decompression stream: %v", err)
		}

		l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID())
		if err != nil {
			return false, fmt.Errorf("failed to register layer: %v", err)
		}
		logrus.Debugf("layer %s registered successfully", l.DiffID())
		rootFS.Append(l.DiffID())

		// Cache mapping from this layer's DiffID to the blobsum
		if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil {
			return false, err
		}

		defer layer.ReleaseAndLog(p.config.LayerStore, l)

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history)
	if err != nil {
		return false, err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return false, err
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name())
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	var oldTagImageID image.ID
	if !tagUpdated {
		oldTagImageID, err = p.config.TagStore.Get(ref)
		if err != nil || oldTagImageID != imageID {
			tagUpdated = true
		}
	}

	if tagUpdated {
		if canonical, ok := ref.(reference.Canonical); ok {
			if err = p.config.TagStore.AddDigest(canonical, imageID, true); err != nil {
				return false, err
			}
		} else if err = p.config.TagStore.AddTag(ref, imageID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #28
0
func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err = p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)

		for _, d := range downloads {
			p.poolRemoveWithError("pull", d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
			continue
		}
		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:     img,
			poolKey: "layer:" + img.ID,
			digest:  manifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}
		d.tmpFile = tmpFile

		downloads = append(downloads, d)

		broadcaster, found := p.poolAdd("pull", d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	var tagUpdated bool
	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}
			continue
		}

		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       d.broadcaster,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.img.ID),
			Action:    "Extracting",
		})

		err = p.graph.Register(d.img, reader)
		if err != nil {
			return false, err
		}

		if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
			return false, err
		}

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #29
0
func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
	out := p.config.OutStream

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err := p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	layerIDs := []string{}
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		downloads[i].img = img
		downloads[i].digest = manifest.FSLayers[i].BlobSum

		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			continue
		}

		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloads[i].err = make(chan error)
		go p.download(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.layer != nil {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {

				reader := progressreader.New(progressreader.Config{
					In:        d.tmpFile,
					Out:       out,
					Formatter: p.sf,
					Size:      int(d.size),
					NewLines:  false,
					ID:        stringid.TruncateID(d.img.ID),
					Action:    "Extracting",
				})

				err = p.graph.Register(d.img, reader)
				if err != nil {
					return false, err
				}

				if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
Beispiel #30
0
func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) {
	var history []string
	history, err = p.session.GetRemoteHistory(imgID, endpoint)
	if err != nil {
		return false, err
	}
	out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	sessionID := p.session.ID()
	// As imgID has been retained in pullRepository, no need to retain again
	p.graph.Retain(sessionID, history[1:]...)
	defer p.graph.Release(sessionID, history[1:]...)

	layersDownloaded = false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		poolKey := "layer:" + id
		broadcaster, found := p.poolAdd("pull", poolKey)
		broadcaster.Add(out)
		if found {
			logrus.Debugf("Image (id: %s) pull is already running, skipping", id)
			err = broadcaster.Wait()
			if err != nil {
				return layersDownloaded, err
			}
			continue
		}

		// This must use a closure so it captures the value of err when
		// the function returns, not when the 'defer' is evaluated.
		defer func() {
			p.poolRemoveWithError("pull", poolKey, err)
		}()

		if !p.graph.Exists(id) {
			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
			var (
				imgJSON []byte
				imgSize int64
				err     error
				img     *image.Image
			)
			retries := 5
			for j := 1; j <= retries; j++ {
				imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
				if err != nil && j == retries {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				}
				img, err = image.NewImgJSON(imgJSON)
				layersDownloaded = true
				if err != nil && j == retries {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else {
					break
				}
			}

			for j := 1; j <= retries; j++ {
				// Get the layer
				status := "Pulling fs layer"
				if j > 1 {
					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
				}
				broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
				layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
				if uerr, ok := err.(*url.Error); ok {
					err = uerr.Err
				}
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				}
				layersDownloaded = true
				defer layer.Close()

				err = p.graph.Register(img,
					progressreader.New(progressreader.Config{
						In:        layer,
						Out:       broadcaster,
						Formatter: p.sf,
						Size:      imgSize,
						NewLines:  false,
						ID:        stringid.TruncateID(id),
						Action:    "Downloading",
					}))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
					return layersDownloaded, err
				} else {
					break
				}
			}
		}
		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
		broadcaster.Close()
	}
	return layersDownloaded, nil
}