Ejemplo n.º 1
0
// TestConfig saves and loads a config from the backend.
func TestConfig(t testing.TB) {
	b := open(t)
	defer close(t)

	var testString = "Config"

	// create config and read it back
	_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
	if err == nil {
		t.Fatalf("did not get expected error for non-existing config")
	}

	err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString))
	if err != nil {
		t.Fatalf("Save() error: %v", err)
	}

	// try accessing the config with different names, should all return the
	// same config
	for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
		buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
		if err != nil {
			t.Fatalf("unable to read config with name %q: %v", name, err)
		}

		if string(buf) != testString {
			t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
		}
	}
}
Ejemplo n.º 2
0
// TestSaveFilenames tests saving data with various file names in the backend.
func TestSaveFilenames(t testing.TB) {
	b := open(t)
	defer close(t)

	for i, test := range filenameTests {
		h := backend.Handle{Name: test.name, Type: backend.Data}
		err := b.Save(h, []byte(test.data))
		if err != nil {
			t.Errorf("test %d failed: Save() returned %v", i, err)
			continue
		}

		buf, err := backend.LoadAll(b, h, nil)
		if err != nil {
			t.Errorf("test %d failed: Load() returned %v", i, err)
			continue
		}

		if !bytes.Equal(buf, []byte(test.data)) {
			t.Errorf("test %d: returned wrong bytes", i)
		}

		err = b.Remove(h.Type, h.Name)
		if err != nil {
			t.Errorf("test %d failed: Remove() returned %v", i, err)
			continue
		}
	}
}
Ejemplo n.º 3
0
func TestLoadLargeBuffer(t *testing.T) {
	b := mem.New()

	for i := 0; i < 20; i++ {
		data := Random(23+i, rand.Intn(MiB)+500*KiB)

		id := backend.Hash(data)
		err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
		OK(t, err)

		buf := make([]byte, len(data)+100)
		buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
		OK(t, err)

		if len(buf) != len(data) {
			t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
			continue
		}

		if !bytes.Equal(buf, data) {
			t.Errorf("wrong data returned")
			continue
		}
	}
}
Ejemplo n.º 4
0
// LoadKey loads a key from the backend.
func LoadKey(s *Repository, name string) (k *Key, err error) {
	h := backend.Handle{Type: backend.Key, Name: name}
	data, err := backend.LoadAll(s.be, h, nil)
	if err != nil {
		return nil, err
	}

	k = &Key{}
	err = json.Unmarshal(data, k)
	if err != nil {
		return nil, err
	}

	return k, nil
}
Ejemplo n.º 5
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
	debug.Log("Checker.checkPack", "checking pack %v", id.Str())
	h := backend.Handle{Type: backend.Data, Name: id.String()}
	buf, err := backend.LoadAll(r.Backend(), h, nil)
	if err != nil {
		return err
	}

	hash := backend.Hash(buf)
	if !hash.Equal(id) {
		debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
		return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
	}

	unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range unpacker.Entries {
		debug.Log("Checker.checkPack", "  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("Checker.checkPack", "  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
			continue
		}

		hash := backend.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("Checker.checkPack", "  Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, fmt.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Ejemplo n.º 6
0
// TestSave tests saving data in the backend.
func TestSave(t testing.TB) {
	b := open(t)
	defer close(t)
	var id backend.ID

	for i := 0; i < 10; i++ {
		length := rand.Intn(1<<23) + 200000
		data := Random(23, length)
		// use the first 32 byte as the ID
		copy(id[:], data)

		h := backend.Handle{
			Type: backend.Data,
			Name: fmt.Sprintf("%s-%d", id, i),
		}
		err := b.Save(h, data)
		OK(t, err)

		buf, err := backend.LoadAll(b, h, nil)
		OK(t, err)
		if len(buf) != len(data) {
			t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
		}

		if !bytes.Equal(buf, data) {
			t.Fatalf("data not equal")
		}

		fi, err := b.Stat(h)
		OK(t, err)

		if fi.Size != int64(len(data)) {
			t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
		}

		err = b.Remove(h.Type, h.Name)
		if err != nil {
			t.Fatalf("error removing item: %v", err)
		}
	}
}
Ejemplo n.º 7
0
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())

	h := backend.Handle{Type: t, Name: id.String()}
	buf, err := backend.LoadAll(r.be, h, nil)
	if err != nil {
		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
		return nil, err
	}

	if t != backend.Config && !backend.Hash(buf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	// decrypt
	plain, err := r.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	return plain, nil
}
Ejemplo n.º 8
0
// TestBackend tests all functions of the backend.
func TestBackend(t testing.TB) {
	b := open(t)
	defer close(t)

	for _, tpe := range []backend.Type{
		backend.Data, backend.Key, backend.Lock,
		backend.Snapshot, backend.Index,
	} {
		// detect non-existing files
		for _, test := range testStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)

			// test if blob is already in repository
			ret, err := b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "blob was found to exist before creating")

			// try to stat a not existing blob
			h := backend.Handle{Type: tpe, Name: id.String()}
			_, err = b.Stat(h)
			Assert(t, err != nil, "blob data could be extracted before creation")

			// try to read not existing blob
			_, err = b.Load(h, nil, 0)
			Assert(t, err != nil, "blob reader could be obtained before creation")

			// try to get string out, should fail
			ret, err = b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "id %q was found (but should not have)", test.id)
		}

		// add files
		for _, test := range testStrings {
			store(t, b, tpe, []byte(test.data))

			// test Load()
			h := backend.Handle{Type: tpe, Name: test.id}
			buf, err := backend.LoadAll(b, h, nil)
			OK(t, err)
			Equals(t, test.data, string(buf))

			// try to read it out with an offset and a length
			start := 1
			end := len(test.data) - 2
			length := end - start

			buf2 := make([]byte, length)
			n, err := b.Load(h, buf2, int64(start))
			OK(t, err)
			Equals(t, length, n)
			Equals(t, test.data[start:end], string(buf2))
		}

		// test adding the first file again
		test := testStrings[0]

		// create blob
		err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
		Assert(t, err != nil, "expected error, got %v", err)

		// remove and recreate
		err = b.Remove(tpe, test.id)
		OK(t, err)

		// test that the blob is gone
		ok, err := b.Test(tpe, test.id)
		OK(t, err)
		Assert(t, ok == false, "removed blob still present")

		// create blob
		err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
		OK(t, err)

		// list items
		IDs := backend.IDs{}

		for _, test := range testStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)
			IDs = append(IDs, id)
		}

		list := backend.IDs{}

		for s := range b.List(tpe, nil) {
			list = append(list, ParseID(s))
		}

		if len(IDs) != len(list) {
			t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
		}

		sort.Sort(IDs)
		sort.Sort(list)

		if !reflect.DeepEqual(IDs, list) {
			t.Fatalf("lists aren't equal, want:\n  %v\n  got:\n%v\n", IDs, list)
		}

		// remove content if requested
		if TestCleanupTempDirs {
			for _, test := range testStrings {
				id, err := backend.ParseID(test.id)
				OK(t, err)

				found, err := b.Test(tpe, id.String())
				OK(t, err)

				OK(t, b.Remove(tpe, id.String()))

				found, err = b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
			}
		}
	}
}
Ejemplo n.º 9
0
func (cmd CmdCat) Execute(args []string) error {
	if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
		return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
	}

	repo, err := cmd.global.OpenRepository()
	if err != nil {
		return err
	}

	lock, err := lockRepo(repo)
	defer unlockRepo(lock)
	if err != nil {
		return err
	}

	tpe := args[0]

	var id backend.ID
	if tpe != "masterkey" && tpe != "config" {
		id, err = backend.ParseID(args[1])
		if err != nil {
			if tpe != "snapshot" {
				return err
			}

			// find snapshot id with prefix
			id, err = restic.FindSnapshot(repo, args[1])
			if err != nil {
				return err
			}
		}
	}

	// handle all types that don't need an index
	switch tpe {
	case "config":
		buf, err := json.MarshalIndent(repo.Config, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "index":
		buf, err := repo.LoadAndDecrypt(backend.Index, id)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return err

	case "snapshot":
		sn := &restic.Snapshot{}
		err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&sn, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	case "key":
		h := backend.Handle{Type: backend.Key, Name: id.String()}
		buf, err := backend.LoadAll(repo.Backend(), h, nil)
		if err != nil {
			return err
		}

		key := &repository.Key{}
		err = json.Unmarshal(buf, key)
		if err != nil {
			return err
		}

		buf, err = json.MarshalIndent(&key, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "masterkey":
		buf, err := json.MarshalIndent(repo.Key(), "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "lock":
		lock, err := restic.LoadLock(repo, id)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&lock, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	}

	// load index, handle all the other types
	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	switch tpe {
	case "pack":
		h := backend.Handle{Type: backend.Data, Name: id.String()}
		buf, err := backend.LoadAll(repo.Backend(), h, nil)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(buf)
		return err

	case "blob":
		blob, err := repo.Index().Lookup(id)
		if err != nil {
			return err
		}

		buf := make([]byte, blob.Length)
		data, err := repo.LoadBlob(blob.Type, id, buf)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(data)
		return err

	case "tree":
		debug.Log("cat", "cat tree %v", id.Str())
		tree := restic.NewTree()
		err = repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
			return err
		}

		buf, err := json.MarshalIndent(&tree, "", "  ")
		if err != nil {
			debug.Log("cat", "error json.MarshalIndent(): %v", err)
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return nil

	default:
		return errors.New("invalid type")
	}
}
Ejemplo n.º 10
0
func (cmd CmdRebuildIndex) RebuildIndex() error {
	debug.Log("RebuildIndex.RebuildIndex", "start")

	done := make(chan struct{})
	defer close(done)

	indexIDs := backend.NewIDSet()
	for id := range cmd.repo.List(backend.Index, done) {
		indexIDs.Insert(id)
	}

	cmd.global.Printf("rebuilding index from %d indexes\n", len(indexIDs))

	debug.Log("RebuildIndex.RebuildIndex", "found %v indexes", len(indexIDs))

	combinedIndex := repository.NewIndex()
	packsDone := backend.NewIDSet()

	type Blob struct {
		id  backend.ID
		tpe pack.BlobType
	}
	blobsDone := make(map[Blob]struct{})

	i := 0
	for indexID := range indexIDs {
		cmd.global.Printf("  loading index %v\n", i)

		debug.Log("RebuildIndex.RebuildIndex", "load index %v", indexID.Str())
		idx, err := repository.LoadIndex(cmd.repo, indexID.String())
		if err != nil {
			return err
		}

		debug.Log("RebuildIndex.RebuildIndex", "adding blobs from index %v", indexID.Str())

		for packedBlob := range idx.Each(done) {
			packsDone.Insert(packedBlob.PackID)
			b := Blob{
				id:  packedBlob.ID,
				tpe: packedBlob.Type,
			}
			if _, ok := blobsDone[b]; ok {
				continue
			}

			blobsDone[b] = struct{}{}
			combinedIndex.Store(packedBlob)
		}

		combinedIndex.AddToSupersedes(indexID)

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}

		i++
	}

	var err error
	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("removing %d old indexes\n", len(indexIDs))
	for id := range indexIDs {
		debug.Log("RebuildIndex.RebuildIndex", "remove index %v", id.Str())

		err := cmd.repo.Backend().Remove(backend.Index, id.String())
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error removing index %v: %v", id.Str(), err)
			return err
		}
	}

	cmd.global.Printf("checking for additional packs\n")
	newPacks := 0
	var buf []byte
	for packID := range cmd.repo.List(backend.Data, done) {
		if packsDone.Has(packID) {
			continue
		}

		debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str())
		newPacks++

		var err error

		h := backend.Handle{Type: backend.Data, Name: packID.String()}
		buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf)
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while loading pack %v", packID.Str())
			return fmt.Errorf("error while loading pack %v: %v", packID.Str(), err)
		}

		hash := backend.Hash(buf)
		if !hash.Equal(packID) {
			debug.Log("RebuildIndex.RebuildIndex", "Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
			return fmt.Errorf("Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
		}

		up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf))
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str())
			return err
		}

		for _, blob := range up.Entries {
			debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
			combinedIndex.Store(repository.PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: packID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}
	}

	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("added %d packs to the index\n", newPacks)

	debug.Log("RebuildIndex.RebuildIndex", "done")
	return nil
}