func TestAufsPlacerCompliance(t *testing.T) { Convey("Aufs placers make data appear into place", t, testutil.Requires( testutil.RequiresMounts, testutil.WithTmpdir(func() { CheckAssemblerGetsDataIntoPlace(defaultAssembler{Placer: NewAufsPlacer("./aufs-layers")}.Assemble) }), ), ) Convey("Aufs placers support readonly placement", t, testutil.Requires( testutil.RequiresMounts, testutil.WithTmpdir(func() { CheckAssemblerRespectsReadonly(defaultAssembler{Placer: NewAufsPlacer("./aufs-layers")}.Assemble) }), ), ) Convey("Aufs placers support source isolation", t, testutil.Requires( testutil.RequiresMounts, testutil.WithTmpdir(func() { CheckAssemblerIsolatesSource(defaultAssembler{Placer: NewAufsPlacer("./aufs-layers")}.Assemble) }), ), ) }
func Test(t *testing.T) { // uncomment for an example output // Convey("Describe fixture Beta", t, func() { // Println() // goconvey seems to do alignment rong in cli out of box :I // Println(Beta.Describe(CompareAll)) // }) Convey("All fixtures should be able to apply their content to an empty dir", t, testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { for _, fixture := range All { Convey(fmt.Sprintf("- Fixture %q", fixture.Name), func() { fixture.Create(".") So(true, ShouldBeTrue) // reaching here is success }) } }), ), ) Convey("Applying a fixture and rescanning it should produce identical descriptions", t, testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { for _, fixture := range All { Convey(fmt.Sprintf("- Fixture %q", fixture.Name), func() { fixture.Create(".") reheat := Scan(".") So(reheat.Describe(CompareDefaults), ShouldEqual, fixture.Describe(CompareDefaults)) }) } }), ), ) Convey("Symlink breakouts should be refuted", t, FailureContinues, testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { // this is a sketchy, unsandboxed test. // I hope you don't have anything in /tmp/dangerzone, and/or that you're running the entire suite in a vm. os.RemoveAll("/tmp/dangerzone") Convey("With a relative basepath", func() { So(func() { Breakout.Create(".") }, testutil.ShouldPanicWith, fs.BreakoutError) _, err := os.Stat("/tmp/dangerzone/laaaaanaaa") So(err, ShouldNotBeNil) // if nil err, oh my god, it exists }) Convey("With an absolute basepath", func() { pwd, err := os.Getwd() So(err, ShouldBeNil) So(func() { Breakout.Create(pwd) }, testutil.ShouldPanicWith, fs.BreakoutError) _, err = os.Stat("/tmp/dangerzone/laaaaanaaa") So(err, ShouldNotBeNil) // if nil err, oh my god, it exists }) }), ), ) }
func TestCoreCompliance(t *testing.T) { Convey("Spec Compliance: TarExec Transmat", t, testutil.WithTmpdir(func() { // scanning tests.CheckScanWithoutMutation(integrity.TransmatKind("tar"), New) tests.CheckScanEmptyIsCalm(Kind, New) // WILL NOT PASS (no hashes!) -- tests.CheckScanProducesConsistentHash(integrity.TransmatKind("tar"), New) // WILL NOT PASS (no hashes!) -- tests.CheckScanProducesDistinctHashes(integrity.TransmatKind("tar"), New) // round-trip // WILL NOT PASS (no hashes!) -- tests.CheckRoundTrip(integrity.TransmatKind("tar"), New, "./bounce") })) }
func TestCoreCompliance(t *testing.T) { Convey("Spec Compliance: Dir Transmat", t, testutil.WithTmpdir(func() { // scanning tests.CheckScanWithoutMutation(Kind, New) tests.CheckScanProducesConsistentHash(Kind, New) tests.CheckScanProducesDistinctHashes(Kind, New) tests.CheckScanEmptyIsCalm(Kind, New) tests.CheckScanWithFilters(Kind, New) // round-trip tests.CheckRoundTrip(Kind, New, "./bounce") })) }
func Test(t *testing.T) { Convey("Spec Compliance: Chroot Executor", t, testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { execEng := &Executor{} execEng.Configure("chroot_workspace") So(os.Mkdir(execEng.workspacePath, 0755), ShouldBeNil) tests.CheckBasicExecution(execEng) tests.CheckFilesystemContainment(execEng) tests.CheckPwdBehavior(execEng) tests.CheckEnvBehavior(execEng) }), ), ) }
func TestTarOutputCompat(t *testing.T) { Convey("Output should produce a tar recognizable to gnu tar", t, testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { for _, fixture := range filefixture.All { Convey(fmt.Sprintf("- Fixture %q", fixture.Name), func() { // create fixture fixture.Create("./data") // scan it transmat := New("./workdir") transmat.Scan( Kind, "./data", []integrity.SiloURI{ integrity.SiloURI("file://output.tar"), }, ) // sanity check that there's a file. So("./output.tar", testutil.ShouldBeFile, os.FileMode(0)) // now exec tar, and check that it doesn't barf outright. // this is not well isolated from the host; consider improving that a todo. os.Mkdir("./untar", 0755) tarProc := gosh.Gosh( "tar", "-xf", "./output.tar", "-C", "./untar", gosh.NullIO, ).RunAndReport() So(tarProc.GetExitCode(), ShouldEqual, 0) // should look roughly the same again even bounced through // some third-party tar implementation, one would hope. rescan := filefixture.Scan("./untar") comparisonLevel := filefixture.CompareDefaults &^ filefixture.CompareSubsecond So(rescan.Describe(comparisonLevel), ShouldEqual, fixture.Describe(comparisonLevel)) }) } }), ), ) }
func Test(t *testing.T) { Convey("Spec Compliance: nsinit Executor", t, testutil.Requires( testutil.RequiresRoot, testutil.RequiresNamespaces, testutil.WithTmpdir(func() { execEng := &Executor{} execEng.Configure("nsinit_workspace") So(os.Mkdir(execEng.workspacePath, 0755), ShouldBeNil) //tests.CheckBasicExecution(execEng) // correct error reporting sections fail spec compliance tests.CheckFilesystemContainment(execEng) //tests.CheckPwdBehavior(execEng) // correct error reporting sections fail spec compliance tests.CheckEnvBehavior(execEng) }), ), ) }
func CheckAssemblerIsolatesSource(assemblerFn integrity.Assembler) { Convey("Writing to a placement should not alter the source", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") assembly := assemblerFn("./assembled", []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, }) defer assembly.Teardown() f, err := os.OpenFile("./assembled/newfile", os.O_CREATE, 0644) defer f.Close() So(err, ShouldBeNil) scan := filefixture.Scan("./material/alpha") So(scan.Describe(filefixture.CompareDefaults), ShouldEqual, filefixture.Alpha.Describe(filefixture.CompareDefaults)) }), ), ) }
func CheckAssemblerRespectsReadonly(assemblerFn integrity.Assembler) { Convey("Writing to a readonly placement should return EROFS", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") assembly := assemblerFn("./assembled", []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: false}, }) defer assembly.Teardown() f, err := os.OpenFile("./assembled/newfile", os.O_CREATE, 0644) defer f.Close() So(err, ShouldNotBeNil) So(err, ShouldHaveSameTypeAs, &os.PathError{}) So(err.(*os.PathError).Err, ShouldEqual, syscall.EROFS) }), ), ) }
func TestCoreCompliance(t *testing.T) { if _, err := s3gof3r.EnvKeys(); err != nil { t.Skipf("skipping s3 output tests; no s3 credentials loaded (err: %s)", err) } // group all effects of this test run under one "dir" for human reader sanity and cleanup in extremis. testRunGuid := guid.New() Convey("Spec Compliance: S3 Transmat", t, testutil.WithTmpdir(func() { // scanning tests.CheckScanWithoutMutation(Kind, New) tests.CheckScanProducesConsistentHash(Kind, New) tests.CheckScanProducesDistinctHashes(Kind, New) tests.CheckScanEmptyIsCalm(Kind, New) tests.CheckScanWithFilters(Kind, New) // round-trip tests.CheckRoundTrip(Kind, New, "s3://repeatr-test/test-"+testRunGuid+"/rt/obj.tar", "literal path") tests.CheckRoundTrip(Kind, New, "s3+splay://repeatr-test/test-"+testRunGuid+"/rt-splay/heap/", "content addressible path") })) }
func Test(t *testing.T) { Convey("Given a directory with a mixture of files and folders", t, testutil.WithTmpdir(func() { os.Mkdir("src", 0755) os.Mkdir("src/a", 01777) os.Mkdir("src/b", 0750) f, err := os.OpenFile("src/b/c", os.O_RDWR|os.O_CREATE, 0664) So(err, ShouldBeNil) f.Write([]byte("zyx")) So(f.Close(), ShouldBeNil) f, err = os.OpenFile("src/d", os.O_RDWR|os.O_CREATE, 0755) So(err, ShouldBeNil) f.Write([]byte("jkl")) So(f.Close(), ShouldBeNil) Convey("We can walk and fill a bucket", func() { bucket := &MemoryBucket{} err := FillBucket("src", "", bucket, filter.FilterSet{}, sha512.New384) So(err, ShouldBeNil) Convey("Then the bucket contains the file descriptions", func() { So(len(bucket.lines), ShouldEqual, 5) sort.Sort(linesByFilepath(bucket.lines)) So(bucket.lines[0].Metadata.Name, ShouldEqual, "./") So(bucket.lines[1].Metadata.Name, ShouldEqual, "./a/") So(bucket.lines[2].Metadata.Name, ShouldEqual, "./b/") So(bucket.lines[3].Metadata.Name, ShouldEqual, "./b/c") So(bucket.lines[4].Metadata.Name, ShouldEqual, "./d") }) Convey("Doing it again produces identical descriptions", func() { bucket2 := &MemoryBucket{} err := FillBucket("src", "", bucket2, filter.FilterSet{}, sha512.New384) So(err, ShouldBeNil) So(len(bucket2.lines), ShouldEqual, 5) sort.Sort(linesByFilepath(bucket.lines)) sort.Sort(linesByFilepath(bucket2.lines)) for i := range bucket.lines { So(bucket2.lines[i], ShouldResemble, bucket.lines[i]) } }) Convey("We can walk the bucket and touch all records", func() { root := bucket.Iterator() So(root.Record().Metadata.Name, ShouldEqual, "./") node_a := root.NextChild().(RecordIterator) So(node_a.Record().Metadata.Name, ShouldEqual, "./a/") So(node_a.NextChild(), ShouldBeNil) node_b := root.NextChild().(RecordIterator) So(node_b.Record().Metadata.Name, ShouldEqual, "./b/") node_c := node_b.NextChild().(RecordIterator) So(node_c.Record().Metadata.Name, ShouldEqual, "./b/c") So(node_c.NextChild(), ShouldBeNil) node_d := root.NextChild().(RecordIterator) So(node_d.Record().Metadata.Name, ShouldEqual, "./d") So(node_d.NextChild(), ShouldBeNil) So(root.NextChild(), ShouldBeNil) }) }) Convey("We can walk and make a copy while filling a bucket", func() { bucket := &MemoryBucket{} err := FillBucket("src", "dest", bucket, filter.FilterSet{}, sha512.New384) So(err, ShouldBeNil) Convey("Walking the copy should match on hash", func() { bucket2 := &MemoryBucket{} err := FillBucket("dest", "", bucket2, filter.FilterSet{}, sha512.New384) So(err, ShouldBeNil) hash1 := Hash(bucket, sha512.New384) hash2 := Hash(bucket2, sha512.New384) So(hash2, ShouldResemble, hash1) }) }) }), ) }
func TestTarInputCompat(t *testing.T) { projPath, _ := os.Getwd() projPath = filepath.Dir(filepath.Dir(filepath.Dir(projPath))) Convey("Unpacking tars should match exec untar", t, testutil.Requires(testutil.RequiresRoot, testutil.WithTmpdir(func() { checkEquivalence := func(hash, filename string, paveBase bool) { transmat := New("./workdir/tar") // apply it; hope it doesn't blow up arena := transmat.Materialize( integrity.TransmatKind("tar"), integrity.CommitID(hash), []integrity.SiloURI{ integrity.SiloURI("file://" + filename), }, ) defer arena.Teardown() // do a native untar; since we don't have an upfront fixture // for this thing, we'll compare the two as filesystems. // this is not well isolated from the host; consider improving that a todo. os.Mkdir("./untar", 0755) tarProc := gosh.Gosh( "tar", "-xf", filename, "-C", "./untar", gosh.NullIO, ).RunAndReport() So(tarProc.GetExitCode(), ShouldEqual, 0) // native untar may or may not have an opinion about the base dir, depending on how it was formed. // but our scans do, so, if the `paveBase` flag was set to warn us that the tar was missing an "./" entry, flatten that here. if paveBase { So(fspatch.LUtimesNano("./untar", def.Epochwhen, def.Epochwhen), ShouldBeNil) } // scan and compare scan1 := filefixture.Scan(arena.Path()) scan2 := filefixture.Scan("./untar") // boy, that's entertaining though: gnu tar does all the same stuff, // except it doesn't honor our nanosecond timings. // also exclude bodies because they're *big*. comparisonLevel := filefixture.CompareDefaults &^ filefixture.CompareSubsecond &^ filefixture.CompareBody So(scan1.Describe(comparisonLevel), ShouldEqual, scan2.Describe(comparisonLevel)) } Convey("Given a fixture tarball complete with base dir", func() { checkEquivalence( "BX0jm4jRNCg1KMbZfv4zp7ZaShx9SUXKaDrO-Xy6mWIoWOCFP5VnDHDDR3nU4PrR", filepath.Join(projPath, "data/fixture/tar_withBase.tgz"), false, ) }) Convey("Given a fixture tarball lacking base dir", func() { checkEquivalence( "ZdV3xhCGWeJmsfeHpDF4nF9stwvdskYwcepKMcOf7a2ziax1YGjQvGTJjRWFkvG1", filepath.Join(projPath, "data/fixture/tar_sansBase.tgz"), true, ) }) Convey("Given a fixture tarball containing ubuntu", testutil.Requires(testutil.RequiresLongRun, func() { checkEquivalence( ubuntuTarballHash, filepath.Join(projPath, "assets/ubuntu.tar.gz"), true, ) }), ) })), ) Convey("Bouncing unusual tars should match hash", t, // where really all "unusual" means is "valid tar, but not from our own cannonical output". testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { checkBounce := func(hash, filename string) { transmat := New("./workdir/tar") // apply it; hope it doesn't blow up arena := transmat.Materialize( integrity.TransmatKind("tar"), integrity.CommitID(hash), []integrity.SiloURI{ integrity.SiloURI("file://" + filename), }, ) defer arena.Teardown() // scan and compare commitID := transmat.Scan(integrity.TransmatKind("tar"), arena.Path(), nil) // debug: gosh.Sh("tar", "--utc", "-xOvf", filename) So(commitID, ShouldEqual, integrity.CommitID(hash)) } Convey("Given a fixture tarball complete with base dir", func() { checkBounce( "BX0jm4jRNCg1KMbZfv4zp7ZaShx9SUXKaDrO-Xy6mWIoWOCFP5VnDHDDR3nU4PrR", filepath.Join(projPath, "data/fixture/tar_withBase.tgz"), ) }) Convey("Given a fixture tarball lacking base dir", func() { checkBounce( "ZdV3xhCGWeJmsfeHpDF4nF9stwvdskYwcepKMcOf7a2ziax1YGjQvGTJjRWFkvG1", filepath.Join(projPath, "data/fixture/tar_sansBase.tgz"), ) }) // this won't fly until we support hardlinks; the original asset uses them. // Convey("Given a fixture tarball containing ubuntu", // testutil.Requires(testutil.RequiresLongRun, func() { // checkBounce( // ubuntuTarballHash, // filepath.Join(projPath, "assets/ubuntu.tar.gz"), // ) // }), // ) }), ), ) }
func TestGitLocalFileInputCompat(t *testing.T) { // note that this test eschews use of regular file fixtures for a few reasons: // - because it's capable of working without root if it doesn't try to chown // - because we're doing custom content anyway so we have multiple commits // both of these could be addressed with upgrades to filefixtures in the future. Convey("Given a local git repo", t, testutil.Requires( testutil.WithTmpdir(func() { git := git.Bake(gosh.Opts{Env: map[string]string{ "GIT_AUTHOR_NAME": "repeatr", "GIT_AUTHOR_EMAIL": "repeatr", "GIT_COMMITTER_NAME": "repeatr", "GIT_COMMITTER_EMAIL": "repeatr", }}) var dataHash_1 integrity.CommitID var dataHash_2 integrity.CommitID var dataHash_3 integrity.CommitID git.Bake("init", "--", "repo-a").RunAndReport() testutil.UsingDir("repo-a", func() { git.Bake("commit", "--allow-empty", "-m", "testrepo-a initial commit").RunAndReport() dataHash_1 = integrity.CommitID(strings.Trim(git.Bake("rev-parse", "HEAD").Output(), "\n")) ioutil.WriteFile("file-a", []byte("abcd"), 0644) git.Bake("add", ".").RunAndReport() git.Bake("commit", "-m", "testrepo-a commit 1").RunAndReport() dataHash_2 = integrity.CommitID(strings.Trim(git.Bake("rev-parse", "HEAD").Output(), "\n")) ioutil.WriteFile("file-e", []byte("efghi"), 0644) git.Bake("add", ".").RunAndReport() git.Bake("commit", "-m", "testrepo-a commit 2").RunAndReport() dataHash_3 = integrity.CommitID(strings.Trim(git.Bake("rev-parse", "HEAD").Output(), "\n")) }) transmat := New("./workdir") Convey("Materialization should be able to produce the latest commit", FailureContinues, func() { uris := []integrity.SiloURI{integrity.SiloURI("./repo-a")} // materialize from the ID returned by foreign git arena := transmat.Materialize(Kind, dataHash_3, uris, integrity.AcceptHashMismatch) // assert hash match // (normally survival would attest this, but we used the `AcceptHashMismatch` to supress panics in the name of letting the test see more after failures.) So(arena.Hash(), ShouldEqual, dataHash_3) // check filesystem to loosely match the original fixture So(filepath.Join(arena.Path(), "file-a"), testutil.ShouldBeFile) So(filepath.Join(arena.Path(), "file-e"), testutil.ShouldBeFile) So(filepath.Join(arena.Path(), ".git"), testutil.ShouldBeNotFile) }) Convey("Materialization should be able to produce older commits", FailureContinues, func() { uris := []integrity.SiloURI{integrity.SiloURI("./repo-a")} // materialize from the ID returned by foreign git arena := transmat.Materialize(Kind, dataHash_2, uris, integrity.AcceptHashMismatch) // assert hash match // (normally survival would attest this, but we used the `AcceptHashMismatch` to supress panics in the name of letting the test see more after failures.) So(arena.Hash(), ShouldEqual, dataHash_2) // check filesystem to loosely match the original fixture So(filepath.Join(arena.Path(), "file-a"), testutil.ShouldBeFile) So(filepath.Join(arena.Path(), "file-e"), testutil.ShouldBeNotFile) So(filepath.Join(arena.Path(), ".git"), testutil.ShouldBeNotFile) }) })), ) // TODO you really should do this with a fixture loop // but that does also leave questions about multi-commits, branches, etc. // so do both i guess. //filefixture.Beta.Create("repo-a") }
func TestCborMux(t *testing.T) { Convey("Using a cbor file-backed streamer mux", t, testutil.WithTmpdir(func() { strm := CborFileMux("./logfile") Convey("Given a single complete stream", func() { a1 := strm.Appender(1) a1.Write([]byte("asdf")) a1.Write([]byte("qwer")) a1.Close() Convey("Readall should get the whole stream", func() { r1 := strm.Reader(1) bytes, err := ioutil.ReadAll(r1) So(err, ShouldBeNil) So(string(bytes), ShouldEqual, "asdfqwer") Convey("Readall *again* should get the whole stream, from the beginning", func() { r1 := strm.Reader(1) bytes, err := ioutil.ReadAll(r1) So(err, ShouldBeNil) So(string(bytes), ShouldEqual, "asdfqwer") }) }) Convey("Small reads shouldn't lose parts", func() { // small reads should: // 1. finish any previous chunks if buffered from a prior small read -- and then return, without starting a new chunk // 2. return as much as they can r1 := strm.Reader(1) buf := make([]byte, 3) n, err := r1.Read(buf) So(err, ShouldBeNil) So(n, ShouldEqual, 3) So(string(buf[:n]), ShouldEqual, "asd") n, err = r1.Read(buf) So(err, ShouldBeNil) So(n, ShouldEqual, 1) So(string(buf[:n]), ShouldEqual, "f") // make the buffer even more small, so it takes >2 reads buf = make([]byte, 1) n, err = r1.Read(buf) So(err, ShouldBeNil) So(n, ShouldEqual, 1) So(string(buf[:n]), ShouldEqual, "q") n, err = r1.Read(buf) So(err, ShouldBeNil) So(n, ShouldEqual, 1) So(string(buf[:n]), ShouldEqual, "w") n, err = r1.Read(buf) So(err, ShouldBeNil) So(n, ShouldEqual, 1) So(string(buf[:n]), ShouldEqual, "e") }) }) Convey("Given two complete streams", func() { a1 := strm.Appender(1) a2 := strm.Appender(2) a1.Write([]byte("asdf")) a2.Write([]byte("qwer")) a1.Write([]byte("asdf")) a1.Close() a2.Write([]byte("zxcv")) a2.Close() Convey("Readall on one label should get the whole stream for that label", func() { r1 := strm.Reader(1) bytes, err := ioutil.ReadAll(r1) So(err, ShouldBeNil) So(string(bytes), ShouldEqual, "asdfasdf") }) Convey("Readall on both labels should get the whole stream", func() { r12 := strm.Reader(1, 2) bytes, err := ioutil.ReadAll(r12) So(err, ShouldBeNil) So(string(bytes), ShouldEqual, "asdfqwerasdfzxcv") }) }) Convey("Given two in-progress streams", func() { a1 := strm.Appender(1) a2 := strm.Appender(2) a1.Write([]byte("asdf")) a2.Write([]byte("qwer")) Convey("Readall on one label should not return yet", FailureContinues, func() { r1 := strm.Reader(1) r1chan := make(chan resp) go func() { bytes, err := ioutil.ReadAll(r1) r1chan <- resp{bytes, err} }() select { case <-r1chan: So(true, ShouldBeFalse) default: // should be blocked and bounce out here So(true, ShouldBeTrue) } Convey("Sending more bytes and closing should be readable", func() { a1.Write([]byte("zxcv")) select { case <-r1chan: So(true, ShouldBeFalse) default: // should be blocked and bounce out here So(true, ShouldBeTrue) } a1.Close() select { case resp := <-r1chan: So(resp.err, ShouldBeNil) So(string(resp.msg), ShouldEqual, "asdfzxcv") case <-time.After(1 * time.Second): So(true, ShouldBeFalse) } }) }) }) Convey("It should parse as regular cbor", func() { a1 := strm.Appender(1) a1.Write([]byte("asdf")) a2 := strm.Appender(2) a2.Write([]byte("qwer")) a1.Close() a2.Close() strm.(*CborMux).Close() file, err := os.OpenFile("./logfile", os.O_RDONLY, 0) So(err, ShouldBeNil) dec := codec.NewDecoder(file, new(codec.CborHandle)) reheated := make([]cborMuxRow, 0) dec.MustDecode(&reheated) So(reheated, ShouldResemble, []cborMuxRow{ {Label: 1, Msg: []byte("asdf")}, {Label: 2, Msg: []byte("qwer")}, {Label: 1, Sig: 1}, {Label: 2, Sig: 1}, }) }) })) }
// you probs want to create that assembler with a variety of placers func CheckAssemblerGetsDataIntoPlace(assemblerFn integrity.Assembler) { Convey("Assembly with just a root fs works", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha"}, }, filefixture.Alpha, ) }), ), ) Convey("Assembly with one placement into an existing dir works", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") filefixture.Beta.Create("./material/beta") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, {TargetPath: "/a", SourcePath: "./material/beta", Writable: true}, }, filefixture.ConjoinFixtures([]filefixture.FixtureAssemblyPart{ {TargetPath: "/", Fixture: filefixture.Alpha}, {TargetPath: "/a", Fixture: filefixture.Beta}, }), ) }), ), ) Convey("Assembly with one placement into an implicitly-created dir works", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") filefixture.Beta.Create("./material/beta") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, {TargetPath: "/q", SourcePath: "./material/beta", Writable: true}, }, filefixture.ConjoinFixtures([]filefixture.FixtureAssemblyPart{ {TargetPath: "/", Fixture: filefixture.Alpha}, {TargetPath: "/q", Fixture: filefixture.Beta}, }), ) }), ), ) Convey("Assembly with overlapping placements shows only top layer", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") filefixture.Beta.Create("./material/beta") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, // this one's interesting because ./b/c is already a file {TargetPath: "/b", SourcePath: "./material/beta", Writable: true}, }, filefixture.ConjoinFixtures([]filefixture.FixtureAssemblyPart{ {TargetPath: "/", Fixture: filefixture.Alpha}, {TargetPath: "/b", Fixture: filefixture.Beta}, }), ) }), ), ) Convey("Assembly using the same base twice works", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") filefixture.Beta.Create("./material/beta") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, {TargetPath: "/q", SourcePath: "./material/beta", Writable: true}, {TargetPath: "/w", SourcePath: "./material/beta", Writable: true}, }, filefixture.ConjoinFixtures([]filefixture.FixtureAssemblyPart{ {TargetPath: "/", Fixture: filefixture.Alpha}, {TargetPath: "/q", Fixture: filefixture.Beta}, {TargetPath: "/w", Fixture: filefixture.Beta}, }), ) }), ), ) Convey("Assembly with implicitly created deep dirs works", testutil.Requires( testutil.RequiresRoot, testutil.WithTmpdir(func() { filefixture.Alpha.Create("./material/alpha") filefixture.Beta.Create("./material/beta") assembleAndScan( assemblerFn, []integrity.AssemblyPart{ {TargetPath: "/", SourcePath: "./material/alpha", Writable: true}, {TargetPath: "/a", SourcePath: "./material/beta", Writable: true}, {TargetPath: "/d/d/d", SourcePath: "./material/beta", Writable: true}, }, filefixture.Fixture{Files: []filefixture.FixtureFile{ {fs.Metadata{Name: ".", Mode: 0755, ModTime: time.Unix(1000, 2000)}, nil}, // even though the basedir was made by the assembler, this should have the rootfs's properties overlayed onto it {fs.Metadata{Name: "./a"}, nil}, // this one's mode and times should be overlayed by the second mount {fs.Metadata{Name: "./a/1"}, []byte{}}, {fs.Metadata{Name: "./a/2"}, []byte{}}, {fs.Metadata{Name: "./a/3"}, []byte{}}, {fs.Metadata{Name: "./b", Mode: 0750, ModTime: time.Unix(5000, 2000)}, nil}, {fs.Metadata{Name: "./b/c", Mode: 0664, ModTime: time.Unix(7000, 2000)}, []byte("zyx")}, {fs.Metadata{Name: "./d", Uid: -1, Gid: -1}, nil}, // these should have been manifested by the assembler {fs.Metadata{Name: "./d/d", Uid: -1, Gid: -1}, nil}, {fs.Metadata{Name: "./d/d/d"}, nil}, {fs.Metadata{Name: "./d/d/d/1"}, []byte{}}, {fs.Metadata{Name: "./d/d/d/2"}, []byte{}}, {fs.Metadata{Name: "./d/d/d/3"}, []byte{}}, }}.Defaults(), ) }), ), ) // additional coverage todos: // - failure path: placement that overlaps a file somewhere // - everything about changes and ensuring they're isolated... deserves a whole battery }