Exemplo n.º 1
0
func (d *disk) Add(digest isolated.HexDigest, src io.Reader) error {
	if !digest.Validate() {
		return os.ErrInvalid
	}
	p := d.itemPath(digest)
	dst, err := os.Create(p)
	if err != nil {
		return err
	}
	h := isolated.GetHash()
	// TODO(maruel): Use a LimitedReader flavor that fails when reaching limit.
	size, err := io.Copy(dst, io.TeeReader(src, h))
	if err2 := dst.Close(); err == nil {
		err = err2
	}
	if err != nil {
		_ = os.Remove(p)
		return err
	}
	if isolated.Sum(h) != digest {
		_ = os.Remove(p)
		return errors.New("invalid hash")
	}
	if common.Size(size) > d.policies.MaxSize {
		_ = os.Remove(p)
		return errors.New("item too large")
	}

	d.lock.Lock()
	defer d.lock.Unlock()
	d.lru.pushFront(digest, common.Size(size))
	d.respectPolicies()
	return nil
}
Exemplo n.º 2
0
func TestArchiverEmpty(t *testing.T) {
	t.Parallel()
	a := New(isolatedclient.New("https://localhost:1", "default-gzip"), nil)
	stats := a.Stats()
	ut.AssertEqual(t, 0, stats.TotalHits())
	ut.AssertEqual(t, 0, stats.TotalMisses())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesHits())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesPushed())
	ut.AssertEqual(t, nil, a.Close())
}
Exemplo n.º 3
0
// doUpload is called by stage 4.
func (a *archiver) doUpload(item *archiverItem) {
	var src io.ReadSeeker
	if item.src == nil {
		f, err := os.Open(item.path)
		if err != nil {
			a.Cancel(err)
			item.setErr(err)
			item.Close()
			return
		}
		defer f.Close()
		src = f
	} else {
		src = item.src
		item.src = nil
	}
	start := time.Now()
	if err := a.is.Push(item.state, src); err != nil {
		err = fmt.Errorf("push(%s) failed: %s\n", item.path, err)
		a.Cancel(err)
		item.setErr(err)
	} else {
		a.progress.Update(groupUpload, groupUploadDone, 1)
		a.progress.Update(groupUpload, groupUploadDoneSize, item.digestItem.Size)
	}
	item.Close()
	size := common.Size(item.digestItem.Size)
	u := &UploadStat{time.Since(start), size, item.DisplayName()}
	a.statsLock.Lock()
	a.stats.Pushed = append(a.stats.Pushed, u)
	a.statsLock.Unlock()
	log.Printf("Uploaded %7s: %s\n", size, item.DisplayName())
}
Exemplo n.º 4
0
// doContains is called by stage 3.
func (a *archiver) doContains(items []*archiverItem) {
	tmp := make([]*isolated.DigestItem, len(items))
	// No need to lock each item at that point, no mutation occurs on
	// archiverItem.digestItem after stage 2.
	for i, item := range items {
		tmp[i] = &item.digestItem
	}
	states, err := a.is.Contains(tmp)
	if err != nil {
		err = fmt.Errorf("contains(%d) failed: %s", len(items), err)
		a.Cancel(err)
		for _, item := range items {
			item.setErr(err)
		}
		return
	}
	a.progress.Update(groupLookup, groupLookupDone, int64(len(items)))
	for index, state := range states {
		size := items[index].digestItem.Size
		if state == nil {
			a.statsLock.Lock()
			a.stats.Hits = append(a.stats.Hits, common.Size(size))
			a.statsLock.Unlock()
			items[index].Close()
		} else {
			items[index].state = state
			a.progress.Update(groupUpload, groupUploadTodo, 1)
			a.progress.Update(groupUpload, groupUploadTodoSize, items[index].digestItem.Size)
			a.stage4UploadChan <- items[index]
		}
	}
	log.Printf("Looked up %d items\n", len(items))
}
Exemplo n.º 5
0
// TotalBytesPushed returns the sum of bytes uploaded.
func (s *Stats) TotalBytesPushed() common.Size {
	out := common.Size(0)
	for _, i := range s.Pushed {
		out += i.Size
	}
	return out
}
Exemplo n.º 6
0
// TotalBytesHits is the number of bytes not uploaded due to cache hits on the
// server.
func (s *Stats) TotalBytesHits() common.Size {
	out := common.Size(0)
	for _, i := range s.Hits {
		out += i
	}
	return out
}
Exemplo n.º 7
0
func TestArchiverFileHit(t *testing.T) {
	t.Parallel()
	server := isolatedfake.New()
	ts := httptest.NewServer(server)
	defer ts.Close()
	a := New(isolatedclient.New(ts.URL, "default-gzip"), nil)
	server.Inject([]byte("foo"))
	future := a.Push("foo", bytes.NewReader([]byte("foo")), 0)
	future.WaitForHashed()
	ut.AssertEqual(t, isolated.HexDigest("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), future.Digest())
	ut.AssertEqual(t, nil, a.Close())

	stats := a.Stats()
	ut.AssertEqual(t, 1, stats.TotalHits())
	ut.AssertEqual(t, 0, stats.TotalMisses())
	ut.AssertEqual(t, common.Size(3), stats.TotalBytesHits())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesPushed())
}
Exemplo n.º 8
0
func TestArchiverFile(t *testing.T) {
	t.Parallel()
	server := isolatedfake.New()
	ts := httptest.NewServer(server)
	defer ts.Close()
	a := New(isolatedclient.New(ts.URL, "default-gzip"), nil)

	fEmpty, err := ioutil.TempFile("", "archiver")
	ut.AssertEqual(t, nil, err)
	future1 := a.PushFile(fEmpty.Name(), fEmpty.Name(), 0)
	ut.AssertEqual(t, fEmpty.Name(), future1.DisplayName())
	fFoo, err := ioutil.TempFile("", "archiver")
	ut.AssertEqual(t, nil, err)
	ut.AssertEqual(t, nil, ioutil.WriteFile(fFoo.Name(), []byte("foo"), 0600))
	future2 := a.PushFile(fFoo.Name(), fFoo.Name(), 0)
	// Push the same file another time. It'll get linked to the first.
	future3 := a.PushFile(fFoo.Name(), fFoo.Name(), 0)
	future1.WaitForHashed()
	future2.WaitForHashed()
	future3.WaitForHashed()
	ut.AssertEqual(t, nil, a.Close())

	stats := a.Stats()
	ut.AssertEqual(t, 0, stats.TotalHits())
	// Only 2 lookups, not 3.
	ut.AssertEqual(t, 2, stats.TotalMisses())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesHits())
	ut.AssertEqual(t, common.Size(3), stats.TotalBytesPushed())
	expected := map[isolated.HexDigest][]byte{
		"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33": []byte("foo"),
		"da39a3ee5e6b4b0d3255bfef95601890afd80709": {},
	}
	ut.AssertEqual(t, expected, server.Contents())
	ut.AssertEqual(t, isolated.HexDigest("da39a3ee5e6b4b0d3255bfef95601890afd80709"), future1.Digest())
	ut.AssertEqual(t, nil, future1.Error())
	ut.AssertEqual(t, isolated.HexDigest("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), future2.Digest())
	ut.AssertEqual(t, nil, future2.Error())
	ut.AssertEqual(t, isolated.HexDigest("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), future3.Digest())
	ut.AssertEqual(t, nil, future3.Error())
	ut.AssertEqual(t, nil, server.Error())
}
Exemplo n.º 9
0
func (m *memory) Add(digest isolated.HexDigest, src io.Reader) error {
	if !digest.Validate() {
		return os.ErrInvalid
	}
	// TODO(maruel): Use a LimitedReader flavor that fails when reaching limit.
	content, err := ioutil.ReadAll(src)
	if err != nil {
		return err
	}
	if isolated.HashBytes(content) != digest {
		return errors.New("invalid hash")
	}
	if common.Size(len(content)) > m.policies.MaxSize {
		return errors.New("item too large")
	}
	m.lock.Lock()
	defer m.lock.Unlock()
	m.data[digest] = content
	m.lru.pushFront(digest, common.Size(len(content)))
	m.respectPolicies()
	return nil
}
Exemplo n.º 10
0
func (e *entry) UnmarshalJSON(data []byte) error {
	// decode from tuple.
	var elems []interface{}
	if err := json.Unmarshal(data, &elems); err != nil {
		return fmt.Errorf("invalid entry: %s: %s", err, string(data))
	}
	if len(elems) != 2 {
		return fmt.Errorf("invalid entry: expected 2 items: %s", string(data))
	}
	if key, ok := elems[0].(string); ok {
		e.key = isolated.HexDigest(key)
		if value, ok := elems[1].(float64); ok {
			e.value = common.Size(value)
		} else {
			return fmt.Errorf("invalid entry: expected value to be number: %s", string(data))
		}
	} else {
		return fmt.Errorf("invalid entry: expected key to be string: %s", string(data))
	}
	return nil
}
Exemplo n.º 11
0
func TestArchive(t *testing.T) {
	// Create a .isolate file and archive it.
	t.Parallel()
	server := isolatedfake.New()
	ts := httptest.NewServer(server)
	defer ts.Close()
	a := archiver.New(isolatedclient.New(ts.URL, "default-gzip"), nil)

	// Setup temporary directory.
	//   /base/bar
	//   /base/ignored
	//   /foo/baz.isolate
	//   /link -> /base/bar
	// Result:
	//   /baz.isolated
	tmpDir, err := ioutil.TempDir("", "isolate")
	ut.AssertEqual(t, nil, err)
	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			t.Fail()
		}
	}()
	baseDir := filepath.Join(tmpDir, "base")
	fooDir := filepath.Join(tmpDir, "foo")
	ut.AssertEqual(t, nil, os.Mkdir(baseDir, 0700))
	ut.AssertEqual(t, nil, os.Mkdir(fooDir, 0700))
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(baseDir, "bar"), []byte("foo"), 0600))
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(baseDir, "ignored"), []byte("ignored"), 0600))
	isolate := `{
		'variables': {
			'files': [
				'../base/',
				'../link',
			],
		},
		'conditions': [
			['OS=="amiga"', {
				'variables': {
					'command': ['amiga', '<(EXTRA)'],
				},
			}],
			['OS=="win"', {
				'variables': {
					'command': ['win'],
				},
			}],
		],
	}`
	isolatePath := filepath.Join(fooDir, "baz.isolate")
	ut.AssertEqual(t, nil, ioutil.WriteFile(isolatePath, []byte(isolate), 0600))
	if !common.IsWindows() {
		ut.AssertEqual(t, nil, os.Symlink(filepath.Join("base", "bar"), filepath.Join(tmpDir, "link")))
	} else {
		ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(tmpDir, "link"), []byte("no link on Windows"), 0600))
	}
	opts := &ArchiveOptions{
		Isolate:         isolatePath,
		Isolated:        filepath.Join(tmpDir, "baz.isolated"),
		Blacklist:       common.Strings{"ignored", "*.isolate"},
		PathVariables:   map[string]string{"VAR": "wonderful"},
		ExtraVariables:  map[string]string{"EXTRA": "really"},
		ConfigVariables: map[string]string{"OS": "amiga"},
	}
	future := Archive(a, opts)
	ut.AssertEqual(t, "baz.isolated", future.DisplayName())
	future.WaitForHashed()
	ut.AssertEqual(t, nil, future.Error())
	ut.AssertEqual(t, nil, a.Close())

	mode := 0600
	if common.IsWindows() {
		mode = 0666
	}
	//   /base/
	isolatedDirData := isolated.Isolated{
		Algo: "sha-1",
		Files: map[string]isolated.File{
			filepath.Join("base", "bar"): {Digest: "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", Mode: newInt(mode), Size: newInt64(3)},
		},
		Version: isolated.IsolatedFormatVersion,
	}
	encoded, err := json.Marshal(isolatedDirData)
	ut.AssertEqual(t, nil, err)
	isolatedDirEncoded := string(encoded) + "\n"
	isolatedDirHash := isolated.HashBytes([]byte(isolatedDirEncoded))

	isolatedData := isolated.Isolated{
		Algo:        "sha-1",
		Command:     []string{"amiga", "really"},
		Files:       map[string]isolated.File{},
		Includes:    []isolated.HexDigest{isolatedDirHash},
		RelativeCwd: "foo",
		Version:     isolated.IsolatedFormatVersion,
	}
	if !common.IsWindows() {
		isolatedData.Files["link"] = isolated.File{Link: newString(filepath.Join("base", "bar"))}
	} else {
		isolatedData.Files["link"] = isolated.File{Digest: "12339b9756c2994f85c310d560bc8c142a6b79a1", Mode: newInt(0666), Size: newInt64(18)}
	}
	encoded, err = json.Marshal(isolatedData)
	ut.AssertEqual(t, nil, err)
	isolatedEncoded := string(encoded) + "\n"
	isolatedHash := isolated.HashBytes([]byte(isolatedEncoded))

	expected := map[string]string{
		"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33": "foo",
		string(isolatedDirHash):                    isolatedDirEncoded,
		string(isolatedHash):                       isolatedEncoded,
	}
	if common.IsWindows() {
		expected["12339b9756c2994f85c310d560bc8c142a6b79a1"] = "no link on Windows"
	}
	actual := map[string]string{}
	for k, v := range server.Contents() {
		actual[string(k)] = string(v)
		ut.AssertEqualf(t, expected[string(k)], actual[string(k)], "%s: %#v", k, actual[string(k)])
	}
	ut.AssertEqual(t, expected, actual)
	ut.AssertEqual(t, isolatedHash, future.Digest())

	stats := a.Stats()
	ut.AssertEqual(t, 0, stats.TotalHits())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesHits())
	if !common.IsWindows() {
		ut.AssertEqual(t, 3, stats.TotalMisses())
		ut.AssertEqual(t, common.Size(3+len(isolatedDirEncoded)+len(isolatedEncoded)), stats.TotalBytesPushed())
	} else {
		ut.AssertEqual(t, 4, stats.TotalMisses())
		ut.AssertEqual(t, common.Size(3+18+len(isolatedDirEncoded)+len(isolatedEncoded)), stats.TotalBytesPushed())
	}

	ut.AssertEqual(t, nil, server.Error())
	digest, err := isolated.HashFile(filepath.Join(tmpDir, "baz.isolated"))
	ut.AssertEqual(t, isolated.DigestItem{isolatedHash, false, int64(len(isolatedEncoded))}, digest)
	ut.AssertEqual(t, nil, err)
}
Exemplo n.º 12
0
func TestPushDirectory(t *testing.T) {
	// Uploads a real directory. 2 times the same file.
	t.Parallel()
	server := isolatedfake.New()
	ts := httptest.NewServer(server)
	defer ts.Close()
	a := New(isolatedclient.New(ts.URL, "default-gzip"), nil)

	// Setup temporary directory.
	tmpDir, err := ioutil.TempDir("", "archiver")
	ut.AssertEqual(t, nil, err)
	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			t.Fail()
		}
	}()
	baseDir := filepath.Join(tmpDir, "base")
	ignoredDir := filepath.Join(tmpDir, "ignored1")
	ut.AssertEqual(t, nil, os.Mkdir(baseDir, 0700))
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(baseDir, "bar"), []byte("foo"), 0600))
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(baseDir, "bar_dupe"), []byte("foo"), 0600))
	if !common.IsWindows() {
		ut.AssertEqual(t, nil, os.Symlink("bar", filepath.Join(baseDir, "link")))
	}
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(baseDir, "ignored2"), []byte("ignored"), 0600))
	ut.AssertEqual(t, nil, os.Mkdir(ignoredDir, 0700))
	ut.AssertEqual(t, nil, ioutil.WriteFile(filepath.Join(ignoredDir, "really"), []byte("ignored"), 0600))

	future := PushDirectory(a, tmpDir, "", []string{"ignored1", filepath.Join("*", "ignored2")})
	ut.AssertEqual(t, filepath.Base(tmpDir)+".isolated", future.DisplayName())
	future.WaitForHashed()
	ut.AssertEqual(t, nil, a.Close())

	mode := 0600
	if common.IsWindows() {
		mode = 0666
	}
	isolatedData := isolated.Isolated{
		Algo: "sha-1",
		Files: map[string]isolated.File{
			filepath.Join("base", "bar"):      {Digest: "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", Mode: newInt(mode), Size: newInt64(3)},
			filepath.Join("base", "bar_dupe"): {Digest: "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", Mode: newInt(mode), Size: newInt64(3)},
		},
		Version: isolated.IsolatedFormatVersion,
	}
	if !common.IsWindows() {
		isolatedData.Files[filepath.Join("base", "link")] = isolated.File{Link: newString("bar")}
	}
	encoded, err := json.Marshal(isolatedData)
	ut.AssertEqual(t, nil, err)
	isolatedEncoded := string(encoded) + "\n"
	isolatedHash := isolated.HashBytes([]byte(isolatedEncoded))

	expected := map[string]string{
		"0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33": "foo",
		string(isolatedHash):                       isolatedEncoded,
	}
	actual := map[string]string{}
	for k, v := range server.Contents() {
		actual[string(k)] = string(v)
	}
	ut.AssertEqual(t, expected, actual)
	ut.AssertEqual(t, isolatedHash, future.Digest())

	stats := a.Stats()
	ut.AssertEqual(t, 0, stats.TotalHits())
	// There're 3 cache misses even if the same content is looked up twice.
	ut.AssertEqual(t, 3, stats.TotalMisses())
	ut.AssertEqual(t, common.Size(0), stats.TotalBytesHits())
	ut.AssertEqual(t, common.Size(3+3+len(isolatedEncoded)), stats.TotalBytesPushed())

	ut.AssertEqual(t, nil, server.Error())
}