Exemplo n.º 1
0
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
	for _, l := range layers {
		b, err := dm.blobStore.New()
		if err != nil {
			return initialRootFS, nil, err
		}
		defer b.Close()
		rc, _, err := l.Download(ctx, progressOutput)
		if err != nil {
			return initialRootFS, nil, errors.Wrap(err, "failed to download")
		}
		defer rc.Close()
		r := io.TeeReader(rc, b)
		inflatedLayerData, err := archive.DecompressStream(r)
		if err != nil {
			return initialRootFS, nil, err
		}
		digester := digest.Canonical.New()
		if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
			return initialRootFS, nil, err
		}
		initialRootFS.Append(layer.DiffID(digester.Digest()))
		d, err := b.Commit()
		if err != nil {
			return initialRootFS, nil, err
		}
		dm.blobs = append(dm.blobs, d)
	}
	return initialRootFS, nil, nil
}
Exemplo n.º 2
0
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *naiveDiffDriver) ApplyDiff(id, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
	driver := gdw.ProtoDriver

	// Mount the root filesystem so we can apply the diff/layer.
	layerFs, err := driver.Get(id, "")
	if err != nil {
		return
	}
	defer driver.Put(id)

	start := time.Now().UTC()
	log.Debugf("Start untar layer")
	if err = archive.ApplyLayer(layerFs, diff); err != nil {
		return
	}
	log.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())

	if parent == "" {
		return utils.TreeSize(layerFs)
	}

	parentFs, err := driver.Get(parent, "")
	if err != nil {
		err = fmt.Errorf("Driver %s failed to get image parent %s: %s", driver, parent, err)
		return
	}
	defer driver.Put(parent)

	changes, err := archive.ChangesDirs(layerFs, parentFs)
	if err != nil {
		return
	}

	return archive.ChangesSize(layerFs, changes), nil
}
Exemplo n.º 3
0
func (d *Driver) ApplyDiff(id string, parent string, diff archive.ArchiveReader) (bytes int64, err error) {
	dir := d.dir(id)

	if parent == "" {
		return 0, ErrApplyDiffFallback
	}

	parentRootDir := path.Join(d.dir(parent), "root")
	if _, err := os.Stat(parentRootDir); err != nil {
		return 0, ErrApplyDiffFallback
	}

	// We now know there is a parent, and it has a "root" directory containing
	// the full root filesystem. We can just hardlink it and apply the
	// layer. This relies on two things:
	// 1) ApplyDiff is only run once on a clean (no writes to upper layer) container
	// 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks)
	// These are all currently true and are not expected to break

	tmpRootDir, err := ioutil.TempDir(dir, "tmproot")
	if err != nil {
		return 0, err
	}
	defer func() {
		if err != nil {
			os.RemoveAll(tmpRootDir)
		} else {
			os.RemoveAll(path.Join(dir, "upper"))
			os.RemoveAll(path.Join(dir, "work"))
			os.RemoveAll(path.Join(dir, "merged"))
			os.RemoveAll(path.Join(dir, "lower-id"))
		}
	}()

	if err = copyDir(parentRootDir, tmpRootDir, CopyHardlink); err != nil {
		return 0, err
	}

	if err := archive.ApplyLayer(tmpRootDir, diff); err != nil {
		return 0, err
	}

	rootDir := path.Join(dir, "root")
	if err := os.Rename(tmpRootDir, rootDir); err != nil {
		return 0, err
	}

	changes, err := archive.ChangesDirs(rootDir, parentRootDir)
	if err != nil {
		return 0, err
	}

	return archive.ChangesSize(rootDir, changes), nil
}
Exemplo n.º 4
0
func applyLayer(containerId string) error {
	fs := os.Stdin
	dest := path.Join(root, "devicemapper", "mnt", containerId, "rootfs")
	fi, err := os.Stat(dest)
	if err != nil && !os.IsExist(err) {
		return err
	}

	if !fi.IsDir() {
		return fmt.Errorf(" Dest %s is not dir", dest)
	}

	err = archive.ApplyLayer(dest, fs)
	return err
}
Exemplo n.º 5
0
func (s *TagStore) CmdDiffAndApply(job *engine.Job) engine.Status {
	if n := len(job.Args); n != 3 {
		return job.Errorf("Usage : %s CONTAINERID SRCIMAGEID TAGIMAGEID", job.Name)
	}

	var (
		containerID   = job.Args[0]
		localName     = job.Args[1]
		parentImageID = job.Args[2]
		sf            = utils.NewStreamFormatter(job.GetenvBool("json"))
		rate          = 0 // the rate of image layer data is written to the container per second
	)
	if job.EnvExists("rate") {
		rate = job.GetenvInt("rate")
	}

	img, err := s.LookupImage(localName)
	if err != nil {
		return job.Error(err)
	}

	dest := s.graph.Driver().MountPath(containerID)
	fi, err := os.Stat(dest)
	if err != nil && !os.IsExist(err) {
		return job.Error(err)
	}
	if !fi.IsDir() {
		return job.Errorf(" Dest %s is not dir", dest)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Diff two mirrors(%s - %s)", img.ID, parentImageID), nil))
	fs, err := s.graph.Driver().Diff(img.ID, parentImageID, nil)
	if err != nil {
		return job.Error(err)
	}
	defer fs.Close()
	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
	err = archive.ApplyLayer(dest, fs, int64(rate))
	if err != nil {
		return job.Error(err)
	}

	job.Stdout.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil))
	return engine.StatusOK
}
Exemplo n.º 6
0
Arquivo: diff.go Projeto: NERSC/docker
func applyLayer() {
	runtime.LockOSThread()
	flag.Parse()

	if err := syscall.Chroot(flag.Arg(0)); err != nil {
		fatal(err)
	}
	if err := syscall.Chdir("/"); err != nil {
		fatal(err)
	}
	tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
	if err != nil {
		fatal(err)
	}
	os.Setenv("TMPDIR", tmpDir)
	if err := archive.ApplyLayer("/", os.Stdin); err != nil {
		os.RemoveAll(tmpDir)
		fatal(err)
	}
	os.RemoveAll(tmpDir)
	os.Exit(0)
}
Exemplo n.º 7
0
func diffAndApply(id, parent, container string) error {
	g, err := initGraph()
	if err != nil {
		return err
	}

	b, err := checkIsParent(id, parent, g)
	if err != nil {
		return err
	}
	if !b {
		return fmt.Errorf("%s is not parent of %s", parent, id)
	}

	dest := path.Join(root, "devicemapper", "mnt", container, "rootfs")
	fi, err := os.Stat(dest)
	if err != nil && !os.IsExist(err) {
		return err
	}

	if !fi.IsDir() {
		return fmt.Errorf(" Dest %s is not dir", dest)
	}

	driver := g.Driver()
	fs, err := driver.Diff(id, parent)
	if err != nil {
		return err
	}
	defer fs.Close()

	err = archive.ApplyLayer(dest, fs)
	if err != nil {
		return err
	}
	return nil
}
Exemplo n.º 8
0
func (s *TagStore) pullAndMergeImage(r *registry.Session, out io.Writer, containerID, containerImage, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
	newHistory, err := r.GetRemoteHistory(imgID, endpoint, token)
	if err != nil {
		return false, err
	}
	oldHistory, err := r.GetRemoteHistory(containerImage, endpoint, token)
	if err != nil {
		return false, err
	}
	// Compare the differences between the two image
	compareHistory := make(map[string]string, len(oldHistory))
	for _, id := range oldHistory {
		compareHistory[id] = id
	}
	var history []string
	for _, id := range newHistory {
		if _, ok := compareHistory[id]; !ok {
			history = append(history, id)
		}
	}

	layers_downloaded := false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
			log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
			<-c
		}
		defer s.poolRemove("pull", "layer:"+id)

		out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
		var (
			imgJSON []byte
			imgSize int
			err     error
			img     *image.Image
		)
		retries := 5
		for j := 1; j <= retries; j++ {
			imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, err
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			}
			img, err = image.NewImgJSON(imgJSON)
			layers_downloaded = true
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else {
				break
			}
		}

		for j := 1; j <= retries; j++ {
			// Get the layer
			status := "Pulling fs layer"
			if j > 1 {
				status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
			}
			out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
			if uerr, ok := err.(*url.Error); ok {
				err = uerr.Err
			}
			if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else if err != nil {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, err
			}
			layers_downloaded = true
			defer layer.Close()
			if !s.graph.Exists(id) {
				// register when first pull layer
				err = s.graph.Register(img, imgJSON,
					utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
					return layers_downloaded, err
				}
			}
			// add layer to container
			dest := path.Join(s.graph.Driver().MountPath(), containerID, "rootfs")
			out.Write(sf.FormatProgress(utils.TruncateID(id), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
			err = archive.ApplyLayer(dest, layer)
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error merge layers", nil))
				return layers_downloaded, err
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else {
				break
			}
		}
	}
	return layers_downloaded, nil
}
Exemplo n.º 9
0
//pulling using V1 registry
func (s *registrySession) downloadImage(imageName, imageTag, rootfsDest string, gitLayering bool) error {
	repoData, err := s.GetRepositoryData(imageName)
	if err != nil {
		return err
	}
	fmt.Printf("Registry endpoint: %v\n", repoData.Endpoints)

	tagsList, err := s.GetRemoteTags(repoData.Endpoints, imageName, repoData.Tokens)
	if err != nil {
		return err
	}

	imageId := tagsList[imageTag]
	fmt.Printf("Image ID: %v\n", imageId)

	//Download image history
	var imageHistory []string
	for _, ep := range repoData.Endpoints {
		imageHistory, err = s.GetRemoteHistory(imageId, ep, repoData.Tokens)
		if err == nil {
			break
		}
	}
	if err != nil {
		return err
	}

	err = os.MkdirAll(rootfsDest, 0700)
	if err != nil {
		return err
	}

	var gitRepo *gitRepo
	if gitLayering {
		if gitRepo, err = newGitRepo(rootfsDest); err != nil {
			return err
		}
	}

	queue := NewQueue(MAX_DL_CONCURRENCY)
	fmt.Printf("Pulling %d layers:\n", len(imageHistory))

	for i := len(imageHistory) - 1; i >= 0; i-- {
		layerId := imageHistory[i]
		job := NewPullingJob(s, repoData, layerId)
		queue.Enqueue(job)
	}
	<-queue.DoneChan

	fmt.Printf("Downloading layers:\n")

	cpt := 0

	for i := len(imageHistory) - 1; i >= 0; i-- {

		//for each layers
		layerID := imageHistory[i]

		if gitLayering {
			//create a git branch
			if _, err = gitRepo.checkoutB(newBranch(cpt, layerID)); err != nil {
				return err
			}
		}

		//download and untar the layer
		job := queue.CompletedJobWithID(layerID).(*PullingJob)
		fmt.Printf("\t%s (%.2f MB) ... ", layerID, float64(job.LayerSize)/ONE_MB)
		_, err = archive.ApplyLayer(rootfsDest, job.LayerData)
		job.LayerData.Close()
		if err != nil {
			return err
		}

		ioutil.WriteFile(path.Join(rootfsDest, "json"), job.LayerInfo, 0644)
		if gitLayering {
			ioutil.WriteFile(path.Join(rootfsDest, "layersize"), []byte(strconv.Itoa(job.LayerSize)), 0644)
		}

		if gitLayering {
			if _, err = gitRepo.addAllAndCommit("adding layer " + layerID); err != nil {
				return err
			}
		}

		cpt++

		fmt.Printf("done\n")
	}
	return nil
}
Exemplo n.º 10
0
Arquivo: image.go Projeto: vmware/vic
// Create the image directory, create a temp vmdk in this directory,
// attach/mount the disk, unpack the tar, check the checksum.  If the data
// doesn't match the expected checksum, abort by nuking the image directory.
// If everything matches, move the tmp vmdk to ID.vmdk.  The unwind path is a
// bit convoluted here;  we need to clean up on the way out in the error case
func (v *ImageStore) writeImage(op trace.Operation, storeName, parentID, ID string, meta map[string][]byte,
	sum string, r io.Reader) error {

	// Create a temp image directory in the store.
	imageDir := v.imageDirPath(storeName, ID)
	_, err := v.ds.Mkdir(op, true, imageDir)
	if err != nil {
		return err
	}

	// Write the metadata to the datastore
	metaDataDir := v.imageMetadataDirPath(storeName, ID)
	err = writeMetadata(op, v.ds, metaDataDir, meta)
	if err != nil {
		return err
	}

	// datastore path to the parent
	parentDiskDsURI := v.imageDiskDSPath(storeName, parentID)

	// datastore path to the disk we're creating
	diskDsURI := v.imageDiskDSPath(storeName, ID)
	op.Infof("Creating image %s (%s)", ID, diskDsURI)

	var vmdisk *disk.VirtualDisk

	// On error, unmount if mounted, detach if attached, and nuke the image directory
	defer func() {
		if err != nil {
			op.Errorf("Cleaning up failed WriteImage directory %s", imageDir)

			if vmdisk != nil {
				if vmdisk.Mounted() {
					op.Debugf("Unmounting abandoned disk")
					vmdisk.Unmount()
				}

				if vmdisk.Attached() {
					op.Debugf("Detaching abandoned disk")
					v.dm.Detach(op, vmdisk)
				}
			}

			v.ds.Rm(op, imageDir)
		}
	}()

	// Create the disk
	vmdisk, err = v.dm.CreateAndAttach(op, diskDsURI, parentDiskDsURI, 0, os.O_RDWR)
	if err != nil {
		return err
	}
	// tmp dir to mount the disk
	dir, err := ioutil.TempDir("", "mnt-"+ID)
	if err != nil {
		return err
	}
	defer os.RemoveAll(dir)

	if err := vmdisk.Mount(dir, nil); err != nil {
		return err
	}

	h := sha256.New()
	t := io.TeeReader(r, h)

	// Untar the archive
	var n int64
	if n, err = archive.ApplyLayer(dir, t); err != nil {
		return err
	}

	op.Debugf("%s wrote %d bytes", ID, n)

	actualSum := fmt.Sprintf("sha256:%x", h.Sum(nil))
	if actualSum != sum {
		err = fmt.Errorf("Failed to validate image checksum. Expected %s, got %s", sum, actualSum)
		return err
	}

	if err = vmdisk.Unmount(); err != nil {
		return err
	}

	if err = v.dm.Detach(op, vmdisk); err != nil {
		return err
	}

	// Write our own bookkeeping manifest file to the image's directory.  We
	// treat the manifest file like a done file.  Its existence means this vmdk
	// is consistent.  Previously we were writing the vmdk to a tmp vmdk file
	// then moving it (using the MoveDatastoreFile or MoveVirtualDisk calls).
	// However(!!) this flattens the vmdk.  Also mkdir foo && ls -l foo fails
	// on VSAN (see
	// https://github.com/vmware/vic/pull/1764#issuecomment-237093424 for
	// detail).  We basically can't trust any of the datastore calls to help us
	// with atomic operations.  Touching an empty file seems to work well
	// enough.
	if err = v.writeManifest(op, storeName, ID, nil); err != nil {
		return err
	}

	return nil
}