示例#1
0
// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of
// local images)
func TestChrootUntarWithHugeExcludesList(t *testing.T) {
	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)
	src := filepath.Join(tmpdir, "src")
	if err := system.MkdirAll(src, 0700); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
		t.Fatal(err)
	}
	stream, err := archive.Tar(src, archive.Uncompressed)
	if err != nil {
		t.Fatal(err)
	}
	dest := filepath.Join(tmpdir, "dest")
	if err := system.MkdirAll(dest, 0700); err != nil {
		t.Fatal(err)
	}
	options := &archive.TarOptions{}
	//65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
	//on most systems when passed via environment or command line arguments
	excludes := make([]string, 65534, 65534)
	for i := 0; i < 65534; i++ {
		excludes[i] = strings.Repeat(string(i), 64)
	}
	options.ExcludePatterns = excludes
	if err := Untar(stream, dest, options); err != nil {
		t.Fatal(err)
	}
}
示例#2
0
func TestChrootApplyDotDotFile(t *testing.T) {
	tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)
	src := filepath.Join(tmpdir, "src")
	if err := system.MkdirAll(src, 0700); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil {
		t.Fatal(err)
	}
	stream, err := archive.Tar(src, archive.Uncompressed)
	if err != nil {
		t.Fatal(err)
	}
	dest := filepath.Join(tmpdir, "dest")
	if err := system.MkdirAll(dest, 0700); err != nil {
		t.Fatal(err)
	}
	if _, err := ApplyLayer(dest, stream); err != nil {
		t.Fatal(err)
	}
}
示例#3
0
func TestChrootTarUntar(t *testing.T) {
	tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)
	src := filepath.Join(tmpdir, "src")
	if err := system.MkdirAll(src, 0700); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil {
		t.Fatal(err)
	}
	stream, err := archive.Tar(src, archive.Uncompressed)
	if err != nil {
		t.Fatal(err)
	}
	dest := filepath.Join(tmpdir, "src")
	if err := system.MkdirAll(dest, 0700); err != nil {
		t.Fatal(err)
	}
	if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil {
		t.Fatal(err)
	}
}
示例#4
0
func diff(id, parent string) (diff archive.Archive, err error) {

	// create pod

	// start or replace pod
	glog.Infof("Diff between %s and %s", id, parent)
	layerFs := "/tmp/test1"
	if parent == "" {
		archive, err := archive.Tar(layerFs, archive.Uncompressed)
		if err != nil {
			return nil, err
		}
		return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			return err
		}), nil
	}

	parentFs := "/tmp/test2"

	changes, err := archive.ChangesDirs(layerFs, parentFs)
	if err != nil {
		return nil, err
	}

	archive, err := archive.ExportChanges(layerFs, changes)
	if err != nil {
		return nil, err
	}

	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		return err
	}), nil
}
示例#5
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
	glog.Error("Run the graphdriver Diff function")
	driver := gdw.ProtoDriver

	layerFs, err := driver.Get(id, "")
	if err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			driver.Put(id)
		}
	}()

	if parent == "" {
		archive, err := archive.Tar(layerFs, archive.Uncompressed)
		if err != nil {
			return nil, err
		}
		return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			driver.Put(id)
			return err
		}), nil
	}

	parentFs, err := driver.Get(parent, "")
	if err != nil {
		return nil, err
	}
	defer driver.Put(parent)

	changes, err := archive.ChangesDirs(layerFs, parentFs)
	if err != nil {
		return nil, err
	}

	archive, err := archive.ExportChanges(layerFs, changes)
	if err != nil {
		return nil, err
	}

	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		driver.Put(id)
		return err
	}), nil
}
示例#6
0
func TestChrootUntarPath(t *testing.T) {
	tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)
	src := filepath.Join(tmpdir, "src")
	if err := system.MkdirAll(src, 0700); err != nil {
		t.Fatal(err)
	}
	if _, err := prepareSourceDirectory(10, src, true); err != nil {
		t.Fatal(err)
	}
	dest := filepath.Join(tmpdir, "dest")
	// Untar a directory
	if err := UntarPath(src, dest); err == nil {
		t.Fatal("Expected error on untaring a directory")
	}

	// Untar a tar file
	stream, err := archive.Tar(src, archive.Uncompressed)
	if err != nil {
		t.Fatal(err)
	}
	buf := new(bytes.Buffer)
	buf.ReadFrom(stream)
	tarfile := filepath.Join(tmpdir, "src.tar")
	if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil {
		t.Fatal(err)
	}
	if err := UntarPath(tarfile, dest); err != nil {
		t.Fatal(err)
	}
	if err := compareDirectories(src, dest); err != nil {
		t.Fatal(err)
	}
}
示例#7
0
文件: fsdiff.go 项目: sulochan/hyper
func (d *Driver) Diff(id, parent string) (diff archive.Archive, err error) {
	if d.daemon == nil {
		if err := d.Setup(); err != nil {
			return nil, err
		}
	}
	var (
		podData string
		tgtDisk string = ""
		code    int
		cause   string
	)
	srcDisk := fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), id)
	if parent != "" {
		tgtDisk = fmt.Sprintf("%s/images/%s.vdi", d.RootPath(), parent)
	}
	outDir := path.Join(utils.HYPER_ROOT, "tar")
	if err := os.MkdirAll(outDir, 0755); err != nil {
		return nil, err
	}
	uuid, err := virtualbox.GetMediumUUID(srcDisk)
	if err == nil {
		srcDisk = uuid
	}
	// create pod
	podId := "diff-" + id[:10]
	podData, err = MakeDiffPod(podId, "puller:latest", id, srcDisk, tgtDisk, outDir)
	if err != nil {
		return nil, err
	}

	// start or replace pod
	vm, ok := d.daemon.VmList[d.pullVm]
	if !ok {
		return nil, fmt.Errorf("can not find VM(%s)", d.pullVm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err = d.daemon.StartPod(podId, podData, d.pullVm, nil, false, true, types.VM_KEEP_AFTER_SHUTDOWN)
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return nil, err
		}
		vm := d.daemon.VmList[d.pullVm]
		// wait for cmd finish
		_, _, ret3, err := vm.GetVmChan()
		if err != nil {
			glog.Error(err.Error())
			return nil, err
		}
		subVmStatus := ret3.(chan *types.VmResponse)
		var vmResponse *types.VmResponse
		for {
			vmResponse = <-subVmStatus
			if vmResponse.VmId == d.pullVm {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		d.daemon.PodList[podId].Vm = d.pullVm
		// release pod from VM
		code, cause, err = d.daemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			d.daemon.KillVm(d.pullVm)
			return nil, err
		}
		d.daemon.CleanPod(podId)
	} else {
		glog.Errorf("pull vm should not be associated")
		return nil, fmt.Errorf("pull vm is not idle")
	}

	tarFile := outDir + "/" + id + ".tar"
	if _, err := os.Stat(tarFile); err != nil {
		// If the parent is nil, the first layer is also nil.
		// So we may not got tar file
		if parent == "" {
			layerFs := fmt.Sprintf("%s/diff/%s", d.RootPath(), id)
			archive, err := archive.Tar(layerFs, archive.Uncompressed)
			if err != nil {
				return nil, err
			}
			return ioutils.NewReadCloserWrapper(archive, func() error {
				err := archive.Close()
				return err
			}), nil
		} else {
			return nil, fmt.Errorf("the out tar file is not exist")
		}
	}
	f, err := os.Open(tarFile)
	if err != nil {
		return nil, err
	}
	var archive io.ReadCloser
	archive = ioutil.NopCloser(f)
	glog.Infof("Diff between %s and %s", id, parent)
	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		return err
	}), nil
}
示例#8
0
文件: internals.go 项目: neujie/hyper
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {

	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
		origPath = origPath[1:]
	}
	origPath = strings.TrimPrefix(origPath, "./")

	// Twiddle the destPath when its a relative path - meaning, make it
	// relative to the WORKINGDIR
	if !filepath.IsAbs(destPath) {
		hasSlash := strings.HasSuffix(destPath, "/")
		destPath = filepath.Join("/", b.Config.WorkingDir, destPath)

		// Make sure we preserve any trailing slash
		if hasSlash {
			destPath += "/"
		}
	}

	// In the remote/URL case, download it and gen its hashcode
	if urlutil.IsURL(origPath) {
		if !allowRemote {
			return fmt.Errorf("Source can't be a URL for %s", cmdName)
		}

		ci := copyInfo{}
		ci.origPath = origPath
		ci.hash = origPath // default to this but can change
		ci.destPath = destPath
		ci.decompress = false
		*cInfos = append(*cInfos, &ci)

		// Initiate the download
		resp, err := httputils.Download(ci.origPath)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}
		ci.tmpDir = tmpDirName

		// Create a tmp file within our tmp dir
		tmpFileName := filepath.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
			In:        resp.Body,
			Out:       b.OutOld,
			Formatter: b.StreamFormatter,
			Size:      int(resp.ContentLength),
			NewLines:  true,
			ID:        "",
			Action:    "Downloading",
		})); err != nil {
			tmpFile.Close()
			return err
		}
		fmt.Fprintf(b.OutStream, "\n")
		tmpFile.Close()

		// Set the mtime to the Last-Modified header value if present
		// Otherwise just remove atime and mtime
		times := make([]syscall.Timespec, 2)

		lastMod := resp.Header.Get("Last-Modified")
		if lastMod != "" {
			mTime, err := http.ParseTime(lastMod)
			// If we can't parse it then just let it default to 'zero'
			// otherwise use the parsed time value
			if err == nil {
				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
			}
		}

		if err := system.UtimesNano(tmpFileName, times); err != nil {
			return err
		}

		ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(ci.destPath, "/") {
			u, err := url.Parse(origPath)
			if err != nil {
				return err
			}
			path := u.Path
			if strings.HasSuffix(path, "/") {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, "/")
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			ci.destPath = ci.destPath + filename
		}

		// Calc the checksum, even if we're using the cache
		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
		if err != nil {
			return err
		}
		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
		if err != nil {
			return err
		}
		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
			return err
		}
		ci.hash = tarSum.Sum(nil)
		r.Close()

		return nil
	}

	// Deal with wildcards
	if allowWildcards && ContainsWildcards(origPath) {
		for _, fileInfo := range b.context.GetSums() {
			if fileInfo.Name() == "" {
				continue
			}
			match, _ := filepath.Match(origPath, fileInfo.Name())
			if !match {
				continue
			}

			// Note we set allowWildcards to false in case the name has
			// a * in it
			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
		}
		return nil
	}

	// Must be a dir or a file

	if err := b.checkPathForAddition(origPath); err != nil {
		return err
	}
	fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))

	ci := copyInfo{}
	ci.origPath = origPath
	ci.hash = origPath
	ci.destPath = destPath
	ci.decompress = allowDecompression
	*cInfos = append(*cInfos, &ci)

	// Deal with the single file case
	if !fi.IsDir() {
		// This will match first file in sums of the archive
		fis := b.context.GetSums().GetFile(ci.origPath)
		if fis != nil {
			ci.hash = "file:" + fis.Sum()
		}
		return nil
	}

	// Must be a dir
	var subfiles []string
	absOrigPath := filepath.Join(b.contextPath, ci.origPath)

	// Add a trailing / to make sure we only pick up nested files under
	// the dir and not sibling files of the dir that just happen to
	// start with the same chars
	if !strings.HasSuffix(absOrigPath, "/") {
		absOrigPath += "/"
	}

	// Need path w/o / too to find matching dir w/o trailing /
	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]

	for _, fileInfo := range b.context.GetSums() {
		absFile := filepath.Join(b.contextPath, fileInfo.Name())
		// Any file in the context that starts with the given path will be
		// picked up and its hashcode used.  However, we'll exclude the
		// root dir itself.  We do this for a coupel of reasons:
		// 1 - ADD/COPY will not copy the dir itself, just its children
		//     so there's no reason to include it in the hash calc
		// 2 - the metadata on the dir will change when any child file
		//     changes.  This will lead to a miss in the cache check if that
		//     child file is in the .dockerignore list.
		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
			subfiles = append(subfiles, fileInfo.Sum())
		}
	}
	sort.Strings(subfiles)
	hasher := sha256.New()
	hasher.Write([]byte(strings.Join(subfiles, ",")))
	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))

	return nil
}
示例#9
0
文件: job.go 项目: WeiZhang555/hyper
func Build(d *daemon.Daemon, buildConfig *Config) error {
	var (
		repoName string
		tag      string
		context  io.ReadCloser
	)

	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			glog.Error(err.Error())
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				glog.Error(err.Error())
				return err
			}
		}
	}

	if buildConfig.RemoteURL == "" {
		context = ioutil.NopCloser(buildConfig.Context)
	} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
		root, err := utils.GitClone(buildConfig.RemoteURL)
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		defer os.RemoveAll(root)

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		context = c
	} else if urlutil.IsURL(buildConfig.RemoteURL) {
		f, err := httputils.Download(buildConfig.RemoteURL)
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			glog.Error(err.Error())
			return err
		}

		// When we're downloading just a Dockerfile put it in
		// the default name - don't allow the client to move/specify it
		buildConfig.DockerfileName = api.DefaultDockerfileName

		c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
		if err != nil {
			return err
		}
		context = c
	}
	defer context.Close()

	sf := streamformatter.NewJSONStreamFormatter()
	hyper, err := GetDaemon()
	if err != nil {
		glog.Error(err.Error())
		return err
	}
	vmId := "buildervm-" + rand.RandStr(10, "number")
	defer func() {
		glog.V(1).Infof("Kill VM(%s)...", vmId)
		hyper.KillVm(vmId)
	}()

	builder := &Builder{
		Daemon:      d,
		Name:        vmId,
		Hyperdaemon: hyper,
		OutStream: &streamformatter.StdoutFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &streamformatter.StderrFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !buildConfig.SuppressOutput,
		UtilizeCache:    !buildConfig.NoCache,
		Remove:          buildConfig.Remove,
		ForceRemove:     buildConfig.ForceRemove,
		Pull:            buildConfig.Pull,
		OutOld:          buildConfig.Stdout,
		StreamFormatter: sf,
		AuthConfig:      buildConfig.AuthConfig,
		ConfigFile:      buildConfig.ConfigFile,
		dockerfileName:  buildConfig.DockerfileName,
		cpuShares:       buildConfig.CpuShares,
		cpuPeriod:       buildConfig.CpuPeriod,
		cpuQuota:        buildConfig.CpuQuota,
		cpuSetCpus:      buildConfig.CpuSetCpus,
		cpuSetMems:      buildConfig.CpuSetMems,
		cgroupParent:    buildConfig.CgroupParent,
		memory:          buildConfig.Memory,
		memorySwap:      buildConfig.MemorySwap,
		cancelled:       buildConfig.WaitCancelled(),
	}

	id, err := builder.Run(context)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	if repoName != "" {
		return d.Repositories().Tag(repoName, tag, id, true)
	}
	return nil
}