func TestDeepEqualSlices(t *testing.T) { // s1 := []string{"a", "b"} interfaceS1, err := CastInterfaceToInterfaceSlice(s1) require.NoError(t, err) // s2 := []string{"b", "a"} interfaceS2, err := CastInterfaceToInterfaceSlice(s2) require.NoError(t, err) // equal, order doesn't matter require.True(t, DeepEqualSlices(interfaceS1, interfaceS2)) // NOT equal s3 := []string{"b", "a", "c"} interfaceS3, err := CastInterfaceToInterfaceSlice(s3) require.NoError(t, err) require.False(t, DeepEqualSlices(interfaceS1, interfaceS3)) // NOT equal - same length but element differs s4 := []string{"b", "x"} interfaceS4, err := CastInterfaceToInterfaceSlice(s4) require.NoError(t, err) require.False(t, DeepEqualSlices(interfaceS1, interfaceS4)) // empty require.True(t, DeepEqualSlices([]interface{}{}, []interface{}{})) }
func TestMatchesFilterIntersectsPoint(t *testing.T) { p := geom.NewPoint(geom.XY).MustSetCoords(geom.Coord{-122.082506, 37.4249518}) data := formDataPoint(t, p) _, qd, err := queryTokens(QueryTypeIntersects, data, 0.0) require.NoError(t, err) // Same point p2 := geom.NewPoint(geom.XY).MustSetCoords(geom.Coord{-122.082506, 37.4249518}) require.True(t, qd.MatchesFilter(types.Geo{p2})) // Different point p3 := geom.NewPoint(geom.XY).MustSetCoords(geom.Coord{-123.082506, 37.4249518}) require.False(t, qd.MatchesFilter(types.Geo{p3})) // containing poly poly := geom.NewPolygon(geom.XY).MustSetCoords([][]geom.Coord{ {{-122, 37}, {-123, 37}, {-123, 38}, {-122, 38}, {-122, 37}}, }) require.True(t, qd.MatchesFilter(types.Geo{poly})) // Polygon doesn't contains poly = geom.NewPolygon(geom.XY).MustSetCoords([][]geom.Coord{ {{-122, 36}, {-123, 36}, {-123, 37}, {-122, 37}, {-122, 36}}, }) require.False(t, qd.MatchesFilter(types.Geo{poly})) }
func Test_transformTagPushEvent(t *testing.T) { t.Log("Do Transform") { tagPush := TagPushEventModel{ ObjectKind: "tag_push", Ref: "refs/tags/v0.0.2", CheckoutSHA: "7f29cdf31fdff43d7f31a279eec06c9f19ae0d6b", } hookTransformResult := transformTagPushEvent(tagPush) require.NoError(t, hookTransformResult.Error) require.False(t, hookTransformResult.ShouldSkip) require.Equal(t, []bitriseapi.TriggerAPIParamsModel{ { BuildParams: bitriseapi.BuildParamsModel{ Tag: "v0.0.2", CommitHash: "7f29cdf31fdff43d7f31a279eec06c9f19ae0d6b", }, }, }, hookTransformResult.TriggerAPIParams) } t.Log("No CheckoutSHA (tag delete)") { tagPush := TagPushEventModel{ ObjectKind: "tag_push", Ref: "refs/tags/v0.0.2", CheckoutSHA: "", } hookTransformResult := transformTagPushEvent(tagPush) require.EqualError(t, hookTransformResult.Error, "This is a Tag Deleted event, no build is required") require.True(t, hookTransformResult.ShouldSkip) require.Nil(t, hookTransformResult.TriggerAPIParams) } t.Log("Not a tags ref") { tagPush := TagPushEventModel{ ObjectKind: "tag_push", Ref: "refs/not/a/tag", CheckoutSHA: "7f29cdf31fdff43d7f31a279eec06c9f19ae0d6b", } hookTransformResult := transformTagPushEvent(tagPush) require.EqualError(t, hookTransformResult.Error, "Ref (refs/not/a/tag) is not a tags ref") require.False(t, hookTransformResult.ShouldSkip) require.Nil(t, hookTransformResult.TriggerAPIParams) } t.Log("Not a tag_push object") { tagPush := TagPushEventModel{ ObjectKind: "not-a-tag_push", Ref: "refs/tags/v0.0.2", CheckoutSHA: "7f29cdf31fdff43d7f31a279eec06c9f19ae0d6b", } hookTransformResult := transformTagPushEvent(tagPush) require.EqualError(t, hookTransformResult.Error, "Not a Tag Push object: not-a-tag_push") require.False(t, hookTransformResult.ShouldSkip) require.Nil(t, hookTransformResult.TriggerAPIParams) } }
// If the snapshot is loaded first (-ish, because really root has to be loaded first) // it will be used to validate the checksums of all other metadata that gets loaded. // If the checksum doesn't match, or if there is no checksum, then the other metadata // cannot be loaded. func TestSnapshotLoadedFirstChecksumsOthers(t *testing.T) { gun := "docker.com/notary" meta := setupSnapshotChecksumming(t, gun) // --- load root then snapshot builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{}) require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false)) require.NoError(t, builder.Load(data.CanonicalSnapshotRole, meta[data.CanonicalSnapshotRole], 1, false)) // loading timestamp is fine, even though the timestamp metadata has the wrong checksum because // we don't check timestamp checksums require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false)) // loading the other roles' metadata with a space will fail because of a checksum failure (builder // checks right away if the snapshot is loaded) - in the case of targets/other/other, which should // not be in snapshot at all, loading should fail even without a space because there is no checksum // for it for _, roleNameToLoad := range []string{data.CanonicalTargetsRole, "targets/other"} { err := builder.Load(roleNameToLoad, append(meta[roleNameToLoad], ' '), 0, false) require.Error(t, err) checksumErr, ok := err.(data.ErrMismatchedChecksum) require.True(t, ok) require.Contains(t, checksumErr.Error(), fmt.Sprintf("checksum for %s did not match", roleNameToLoad)) require.False(t, builder.IsLoaded(roleNameToLoad)) // now load it for real (since we need targets loaded before trying to load "targets/other") require.NoError(t, builder.Load(roleNameToLoad, meta[roleNameToLoad], 1, false)) } // loading the non-existent role wil fail err := builder.Load("targets/other/other", meta["targets/other/other"], 1, false) require.Error(t, err) require.IsType(t, data.ErrMissingMeta{}, err) require.False(t, builder.IsLoaded("targets/other/other")) }
func TestPollAndExecOnceError(t *testing.T) { task := pendingRecreateWebpageArchivesTask() mockServer := frontend.MockServer{} mockServer.SetCurrentTask(&task.RecreateWebpageArchivesDBTask) defer frontend.CloseTestServer(frontend.InitTestServer(&mockServer)) commandCollector := exec.CommandCollector{} mockRun := exec.MockRun{} commandCollector.SetDelegateRun(mockRun.Run) exec.SetRunForTesting(commandCollector.Run) defer exec.SetRunForTesting(exec.DefaultRun) mockRun.AddRule("capture_archives_on_workers", fmt.Errorf("workers too lazy")) pollAndExecOnce() // Expect only one poll. expect.Equal(t, 1, mockServer.OldestPendingTaskReqCount()) // Expect three commands: git pull; make all; capture_archives_on_workers ... commands := commandCollector.Commands() assert.Len(t, commands, 3) expect.Equal(t, "git pull", exec.DebugString(commands[0])) expect.Equal(t, "make all", exec.DebugString(commands[1])) expect.Equal(t, "capture_archives_on_workers", commands[2].Name) // Expect an update marking task failed when command fails to execute. assert.Len(t, mockServer.UpdateTaskReqs(), 1) updateReq := mockServer.UpdateTaskReqs()[0] assert.Equal(t, "/"+ctfeutil.UPDATE_RECREATE_WEBPAGE_ARCHIVES_TASK_POST_URI, updateReq.Url) assert.NoError(t, updateReq.Error) assert.False(t, updateReq.Vars.TsStarted.Valid) assert.True(t, updateReq.Vars.TsCompleted.Valid) assert.True(t, updateReq.Vars.Failure.Valid) assert.True(t, updateReq.Vars.Failure.Bool) assert.False(t, updateReq.Vars.RepeatAfterDays.Valid) assert.Equal(t, int64(42), updateReq.Vars.Id) }
// testUnfinishedBuild verifies that we can write a build which is not yet // finished, load the build back from the database, and update it when it // finishes. func testUnfinishedBuild(t *testing.T) { d := clearDB(t) defer d.Close(t) // Load the test repo. tr := util.NewTempRepo() defer tr.Cleanup() repos := gitinfo.NewRepoMap(tr.Dir) // Obtain and insert an unfinished build. httpClient = testHttpClient b, err := getBuildFromMaster("client.skia", "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind", 152, repos) assert.Nil(t, err) assert.False(t, b.IsFinished(), fmt.Errorf("Unfinished build thinks it's finished!")) dbSerializeAndCompare(t, b, true) // Ensure that the build is found by getUnfinishedBuilds. unfinished, err := getUnfinishedBuilds() assert.Nil(t, err) found := false for _, u := range unfinished { if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number { found = true break } } assert.True(t, found, "Unfinished build was not found by getUnfinishedBuilds!") // Add another step to the build to "finish" it, ensure that we can // retrieve it as expected. b.Finished = b.Started + 1000 b.Times[1] = b.Finished stepStarted := b.Started + 500 s := &BuildStep{ BuildID: b.Id, Name: "LastStep", Times: []float64{stepStarted, b.Finished}, Number: len(b.Steps), Results: 0, ResultsRaw: []interface{}{0.0, []interface{}{}}, Started: b.Started + 500.0, Finished: b.Finished, } b.Steps = append(b.Steps, s) assert.True(t, b.IsFinished(), "Finished build thinks it's unfinished!") dbSerializeAndCompare(t, b, true) // Ensure that the finished build is NOT found by getUnfinishedBuilds. unfinished, err = getUnfinishedBuilds() assert.Nil(t, err) found = false for _, u := range unfinished { if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number { found = true break } } assert.False(t, found, "Finished build was found by getUnfinishedBuilds!") }
// UpdateTimestamp will re-calculate the snapshot hash func TestSwizzlerUpdateTimestamp(t *testing.T) { f, origMeta := createNewSwizzler(t) // nothing has changed, signed data should be the same (signatures might // change because signatures may have random elements f.UpdateTimestampHash() newMeta, err := f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1) require.NoError(t, err) origSigned, newSigned := &data.Signed{}, &data.Signed{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalTimestampRole], origSigned)) require.NoError(t, json.Unmarshal(newMeta, newSigned)) require.True(t, bytes.Equal(origSigned.Signed, newSigned.Signed)) // update snapshot f.OffsetMetadataVersion(data.CanonicalSnapshotRole, 1) // update the timestamp f.UpdateTimestampHash() newMeta, err = f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1) require.NoError(t, err) require.False(t, bytes.Equal(origMeta[data.CanonicalTimestampRole], newMeta)) origTimestamp, newTimestamp := &data.SignedTimestamp{}, &data.SignedTimestamp{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalTimestampRole], origTimestamp)) require.NoError(t, json.Unmarshal(newMeta, newTimestamp)) require.Len(t, origTimestamp.Signed.Meta, 1) require.Len(t, newTimestamp.Signed.Meta, 1) require.False(t, reflect.DeepEqual( origTimestamp.Signed.Meta[data.CanonicalSnapshotRole], newTimestamp.Signed.Meta[data.CanonicalSnapshotRole])) }
func TestSpecC5(t *testing.T) { var f, g *[]string var x *bool init := func(c *Cmd) { f = c.StringsArg("SRC", nil, "") g = c.StringsArg("DST", nil, "") x = c.BoolOpt("x", false, "") } spec := "(SRC... -x DST) | (SRC... DST)" okCmd(t, spec, init, []string{"A", "B"}) require.Equal(t, []string{"A"}, *f) require.Equal(t, []string{"B"}, *g) require.False(t, *x) okCmd(t, spec, init, []string{"A", "B", "C"}) require.Equal(t, []string{"A", "B"}, *f) require.Equal(t, []string{"C"}, *g) require.False(t, *x) okCmd(t, spec, init, []string{"A", "B", "-x", "C"}) require.Equal(t, []string{"A", "B"}, *f) require.Equal(t, []string{"C"}, *g) require.True(t, *x) }
func TestBoolArg(t *testing.T) { cmd := &Cmd{argsIdx: map[string]*arg{}} a := cmd.Bool(BoolArg{Name: "a", Value: true, Desc: ""}) require.True(t, *a) os.Setenv("B", "") b := cmd.Bool(BoolArg{Name: "b", Value: false, EnvVar: "B", Desc: ""}) require.False(t, *b) trueValues := []string{"1", "true", "TRUE"} for _, tv := range trueValues { os.Setenv("B", tv) b = cmd.Bool(BoolArg{Name: "b", Value: false, EnvVar: "B", Desc: ""}) require.True(t, *b, "env=%s", tv) } falseValues := []string{"0", "false", "FALSE", "xyz"} for _, tv := range falseValues { os.Setenv("B", tv) b = cmd.Bool(BoolArg{Name: "b", Value: false, EnvVar: "B", Desc: ""}) require.False(t, *b, "env=%s", tv) } os.Setenv("B", "") os.Setenv("C", "false") os.Setenv("D", "true") b = cmd.Bool(BoolArg{Name: "b", Value: true, EnvVar: "B C D", Desc: ""}) require.False(t, *b) }
func TestSpecRepeatable2OptionChoice(t *testing.T) { var f, g *bool init := func(c *Cmd) { f = c.BoolOpt("f", false, "") g = c.BoolOpt("g", false, "") } spec := "(-f|-g)..." okCmd(t, spec, init, []string{"-f"}) require.True(t, *f) require.False(t, *g) okCmd(t, spec, init, []string{"-g"}) require.False(t, *f) require.True(t, *g) okCmd(t, spec, init, []string{"-f", "-g"}) require.True(t, *f) require.True(t, *g) okCmd(t, spec, init, []string{"-g", "-f"}) require.True(t, *f) require.True(t, *g) badCases := [][]string{ {"-s"}, {"-f", "xxx"}, {"xxx", "-f"}, } for _, args := range badCases { failCmd(t, spec, init, args) } }
// This adds a single byte of whitespace to the metadata file, so it should be parsed // and deserialized the same way, but checksums against snapshot/timestamp may fail func TestSwizzlerAddExtraSpace(t *testing.T) { f, origMeta := createNewSwizzler(t) f.AddExtraSpace(data.CanonicalTargetsRole) snapshot := &data.SignedSnapshot{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalSnapshotRole], snapshot)) for role, metaBytes := range origMeta { newMeta, err := f.MetadataCache.GetMeta(role, -1) require.NoError(t, err) if role != data.CanonicalTargetsRole { require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role) } else { require.False(t, bytes.Equal(metaBytes, newMeta)) require.True(t, bytes.Equal(metaBytes, newMeta[1:len(metaBytes)+1])) require.Equal(t, byte(' '), newMeta[0]) require.Equal(t, byte(' '), newMeta[len(newMeta)-1]) // make sure the hash is not the same as the hash in snapshot newHash := sha256.Sum256(newMeta) require.False(t, bytes.Equal( snapshot.Signed.Meta[data.CanonicalTargetsRole].Hashes["sha256"], newHash[:])) require.NotEqual(t, snapshot.Signed.Meta[data.CanonicalTargetsRole].Length, len(newMeta)) } } }
// This rotates the key of some base role func TestSwizzlerRotateKeyBaseRole(t *testing.T) { f, origMeta := createNewSwizzler(t) theRole := data.CanonicalSnapshotRole cs := signed.NewEd25519() pubKey, err := cs.Create(theRole, f.Gun, data.ED25519Key) require.NoError(t, err) require.NoError(t, f.RotateKey(theRole, pubKey)) for role, metaBytes := range origMeta { newMeta, err := f.MetadataCache.GetSized(role, store.NoSizeLimit) require.NoError(t, err) if role != data.CanonicalRootRole { require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role) } else { require.False(t, bytes.Equal(metaBytes, newMeta)) origSigned, newSigned := &data.SignedRoot{}, &data.SignedRoot{} require.NoError(t, json.Unmarshal(metaBytes, origSigned)) require.NoError(t, json.Unmarshal(newMeta, newSigned)) require.NotEqual(t, []string{pubKey.ID()}, origSigned.Signed.Roles[theRole].KeyIDs) require.Equal(t, []string{pubKey.ID()}, newSigned.Signed.Roles[theRole].KeyIDs) _, ok := origSigned.Signed.Keys[pubKey.ID()] require.False(t, ok) _, ok = newSigned.Signed.Keys[pubKey.ID()] require.True(t, ok) } } }
// This rotates the key of some delegation role func TestSwizzlerRotateKeyDelegationRole(t *testing.T) { f, origMeta := createNewSwizzler(t) theRole := "targets/a/b" cs := signed.NewEd25519() pubKey, err := cs.Create(theRole, f.Gun, data.ED25519Key) require.NoError(t, err) require.NoError(t, f.RotateKey(theRole, pubKey)) for role, metaBytes := range origMeta { newMeta, err := f.MetadataCache.GetMeta(role, store.NoSizeLimit) require.NoError(t, err) if role != "targets/a" { require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role) } else { require.False(t, bytes.Equal(metaBytes, newMeta)) origSigned, newSigned := &data.SignedTargets{}, &data.SignedTargets{} require.NoError(t, json.Unmarshal(metaBytes, origSigned)) require.NoError(t, json.Unmarshal(newMeta, newSigned)) require.NotEqual(t, []string{pubKey.ID()}, origSigned.Signed.Delegations.Roles[0].KeyIDs) require.Equal(t, []string{pubKey.ID()}, newSigned.Signed.Delegations.Roles[0].KeyIDs) _, ok := origSigned.Signed.Delegations.Keys[pubKey.ID()] require.False(t, ok) _, ok = newSigned.Signed.Delegations.Keys[pubKey.ID()] require.True(t, ok) } } }
func TestTrybotResults(t *testing.T) { // Create a fake roll with one in-progress trybot. roll := &AutoRollIssue{ Closed: false, Committed: false, CommitQueue: true, CommitQueueDryRun: true, Created: time.Now(), Issue: 123, Modified: time.Now(), Patchsets: []int64{1}, Subject: "Roll src/third_party/skia abc123..def456 (3 commits).", } roll.Result = rollResult(roll) from, to, err := rollRev(roll.Subject, func(h string) (string, error) { return h, nil }) assert.Nil(t, err) roll.RollingFrom = from roll.RollingTo = to trybot := &buildbucket.Build{ CreatedTimestamp: fmt.Sprintf("%d", time.Now().UTC().UnixNano()/1000000), Status: TRYBOT_STATUS_STARTED, ParametersJson: "{\"builder_name\":\"fake-builder\"}", } tryResult, err := TryResultFromBuildbucket(trybot) assert.Nil(t, err) roll.TryResults = []*TryResult{tryResult} assert.False(t, roll.AllTrybotsFinished()) assert.False(t, roll.AllTrybotsSucceeded()) // Trybot failed. tryResult.Status = TRYBOT_STATUS_COMPLETED tryResult.Result = TRYBOT_RESULT_FAILURE assert.True(t, roll.AllTrybotsFinished()) assert.False(t, roll.AllTrybotsSucceeded()) retry := &buildbucket.Build{ CreatedTimestamp: fmt.Sprintf("%d", time.Now().UTC().UnixNano()/1000000+25), Status: TRYBOT_STATUS_STARTED, ParametersJson: "{\"builder_name\":\"fake-builder\"}", } tryResult, err = TryResultFromBuildbucket(retry) assert.Nil(t, err) roll.TryResults = append(roll.TryResults, tryResult) assert.False(t, roll.AllTrybotsFinished()) assert.False(t, roll.AllTrybotsSucceeded()) // The second try result, a retry of the first, succeeded. tryResult.Status = TRYBOT_STATUS_COMPLETED tryResult.Result = TRYBOT_RESULT_SUCCESS assert.True(t, roll.AllTrybotsFinished()) assert.True(t, roll.AllTrybotsSucceeded()) // Verify that the ordering of try results does not matter. roll.TryResults[0], roll.TryResults[1] = roll.TryResults[1], roll.TryResults[0] assert.True(t, roll.AllTrybotsFinished()) assert.True(t, roll.AllTrybotsSucceeded()) }
func TestIntCompare(t *testing.T) { require.True(t, intCompare(1, query.EQ, "1")) require.False(t, intCompare(1, query.EQ, "2")) require.True(t, intCompare(2, query.GT, "1")) require.False(t, intCompare(1, query.GT, "1")) }
func TestPassphraseRetrieverDelegationRoleCaching(t *testing.T) { defer cleanupAndSetEnvVars()() // Only set up one passphrase environment var first for delegations require.NoError(t, os.Setenv("NOTARY_DELEGATION_PASSPHRASE", "delegation_passphrase")) // Check that any delegation role is cached retriever := getPassphraseRetriever() passphrase, giveup, err := retriever("key", "targets/releases", false, 0) require.NoError(t, err) require.False(t, giveup) require.Equal(t, passphrase, "delegation_passphrase") passphrase, giveup, err = retriever("key", "targets/delegation", false, 0) require.NoError(t, err) require.False(t, giveup) require.Equal(t, passphrase, "delegation_passphrase") passphrase, giveup, err = retriever("key", "targets/a/b/c/d", false, 0) require.NoError(t, err) require.False(t, giveup) require.Equal(t, passphrase, "delegation_passphrase") // Also check arbitrary usernames that are non-BaseRoles or imported so that this can be shared across keys passphrase, giveup, err = retriever("key", "user", false, 0) require.NoError(t, err) require.False(t, giveup) require.Equal(t, passphrase, "delegation_passphrase") // Make sure base roles fail _, _, err = retriever("key", data.CanonicalRootRole, false, 0) require.Error(t, err) _, _, err = retriever("key", data.CanonicalTargetsRole, false, 0) require.Error(t, err) _, _, err = retriever("key", data.CanonicalSnapshotRole, false, 0) require.Error(t, err) }
func TestSpecSingleDash(t *testing.T) { var path *string var f *bool init := func(c *Cmd) { path = c.StringArg("PATH", "", "'-' can be used to read from stdin' ") f = c.BoolOpt("f", false, "") } spec := "[-f] PATH" okCmd(t, spec, init, []string{"TEST"}) require.Equal(t, "TEST", *path) require.False(t, *f) okCmd(t, spec, init, []string{"-f", "TEST"}) require.Equal(t, "TEST", *path) require.True(t, *f) okCmd(t, spec, init, []string{"-"}) require.Equal(t, "-", *path) require.False(t, *f) okCmd(t, spec, init, []string{"-f", "-"}) require.Equal(t, "-", *path) require.True(t, *f) okCmd(t, spec, init, []string{"--", "-"}) require.Equal(t, "-", *path) require.False(t, *f) okCmd(t, spec, init, []string{"-f", "--", "-"}) require.Equal(t, "-", *path) require.True(t, *f) }
func TestDownloadRootCASuccess(t *testing.T) { tc := testutils.NewTestCA(t) defer tc.Stop() // Remove the CA cert os.RemoveAll(tc.Paths.RootCA.Cert) rootCA, err := ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, tc.WorkerToken, tc.Remotes) require.NoError(t, err) require.NotNil(t, rootCA.Pool) require.NotNil(t, rootCA.Cert) require.Nil(t, rootCA.Signer) require.False(t, rootCA.CanSign()) require.Equal(t, tc.RootCA.Cert, rootCA.Cert) // Remove the CA cert os.RemoveAll(tc.Paths.RootCA.Cert) // downloading without a join token also succeeds rootCA, err = ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, "", tc.Remotes) require.NoError(t, err) require.NotNil(t, rootCA.Pool) require.NotNil(t, rootCA.Cert) require.Nil(t, rootCA.Signer) require.False(t, rootCA.CanSign()) require.Equal(t, tc.RootCA.Cert, rootCA.Cert) }
// No matter what order timestamp and snapshot is loaded, if the snapshot's checksum doesn't match // what's in the timestamp, the builder will error and refuse to load the latest piece of metadata // whether that is snapshot (because it was loaded after timestamp) or timestamp (because builder // retroactive checks the loaded snapshot's checksum). Timestamp ONLY checks the snapshot checksum. func TestTimestampPreAndPostChecksumming(t *testing.T) { gun := "docker.com/notary" repo, _, err := testutils.EmptyRepo(gun, "targets/other", "targets/other/other") require.NoError(t, err) // add invalid checkums for all the other roles to timestamp too, and show that // cached items aren't checksummed against this fakeChecksum, err := data.NewFileMeta(bytes.NewBuffer([]byte("fake")), notary.SHA256, notary.SHA512) require.NoError(t, err) for _, roleName := range append(data.BaseRoles, "targets/other") { // add a wrong checksum for every role, including timestamp itself repo.Timestamp.Signed.Meta[roleName] = fakeChecksum } // this will overwrite the snapshot checksum with the right one meta, err := testutils.SignAndSerialize(repo) require.NoError(t, err) // ensure that the fake meta for other roles weren't destroyed by signing the timestamp require.Len(t, repo.Timestamp.Signed.Meta, 5) snapJSON := append(meta[data.CanonicalSnapshotRole], ' ') // --- load timestamp first builder := tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{}) require.NoError(t, builder.Load(data.CanonicalRootRole, meta[data.CanonicalRootRole], 1, false)) // timestamp doesn't fail, even though its checksum for root is wrong according to timestamp require.NoError(t, builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false)) // loading the snapshot in fails, because of the checksum the timestamp has err = builder.Load(data.CanonicalSnapshotRole, snapJSON, 1, false) require.Error(t, err) require.IsType(t, data.ErrMismatchedChecksum{}, err) require.True(t, builder.IsLoaded(data.CanonicalTimestampRole)) require.False(t, builder.IsLoaded(data.CanonicalSnapshotRole)) // all the other metadata can be loaded in, even though the checksums are wrong according to timestamp for _, roleName := range []string{data.CanonicalTargetsRole, "targets/other"} { require.NoError(t, builder.Load(roleName, meta[roleName], 1, false)) } // --- load snapshot first builder = tuf.NewRepoBuilder(gun, nil, trustpinning.TrustPinConfig{}) for _, roleName := range append(data.BaseRoles, "targets/other") { switch roleName { case data.CanonicalTimestampRole: continue case data.CanonicalSnapshotRole: require.NoError(t, builder.Load(roleName, snapJSON, 1, false)) default: require.NoError(t, builder.Load(roleName, meta[roleName], 1, false)) } } // timestamp fails because the snapshot checksum is wrong err = builder.Load(data.CanonicalTimestampRole, meta[data.CanonicalTimestampRole], 1, false) require.Error(t, err) checksumErr, ok := err.(data.ErrMismatchedChecksum) require.True(t, ok) require.Contains(t, checksumErr.Error(), "checksum for snapshot did not match") require.False(t, builder.IsLoaded(data.CanonicalTimestampRole)) require.True(t, builder.IsLoaded(data.CanonicalSnapshotRole)) }
// testUnfinishedBuild verifies that we can write a build which is not yet // finished, load the build back from the database, and update it when it // finishes. func testUnfinishedBuild(t *testing.T, local bool) { testutils.SkipIfShort(t) d := clearDB(t, local) defer d.Close(t) // Load the test repo. tr := util.NewTempRepo() defer tr.Cleanup() repos := gitinfo.NewRepoMap(tr.Dir) // Obtain and insert an unfinished build. httpClient = testHttpClient b, err := getBuildFromMaster("client.skia", "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind", 152, repos) assert.Nil(t, err) assert.False(t, b.IsFinished(), "Unfinished build thinks it's finished!") dbSerializeAndCompare(t, d, b, true) // Ensure that the build is found by GetUnfinishedBuilds. unfinished, err := d.DB().GetUnfinishedBuilds(b.Master) assert.Nil(t, err) found := false for _, u := range unfinished { if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number { found = true break } } assert.True(t, found, "Unfinished build was not found by getUnfinishedBuilds!") // Add another step to the build to "finish" it, ensure that we can // retrieve it as expected. b.Finished = b.Started.Add(30 * time.Second) stepStarted := b.Started.Add(500 * time.Millisecond) s := &BuildStep{ Name: "LastStep", Number: len(b.Steps), Results: 0, Started: stepStarted, Finished: b.Finished, } b.Steps = append(b.Steps, s) assert.True(t, b.IsFinished(), "Finished build thinks it's unfinished!") dbSerializeAndCompare(t, d, b, true) // Ensure that the finished build is NOT found by getUnfinishedBuilds. unfinished, err = d.DB().GetUnfinishedBuilds(b.Master) assert.Nil(t, err) found = false for _, u := range unfinished { if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number { found = true break } } assert.False(t, found, "Finished build was found by getUnfinishedBuilds!") }
func TestSliceCompare(t *testing.T) { require.True(t, sliceCompare([]string{"test"}, query.EQ, "test")) require.True(t, sliceCompare([]string{"niet", "test"}, query.EQ, "test")) require.False(t, sliceCompare([]string{"niet", "test"}, query.EQ, "tEst")) require.True(t, sliceCompare([]string{"niet", "test"}, query.LIKE, "Est")) require.False(t, sliceCompare([]string{"niet", "test"}, query.LIKE, "42")) }
func Test_HookProvider_TransformRequest(t *testing.T) { provider := HookProvider{} t.Log("Should be OK") { request := http.Request{ Header: http.Header{ "Content-Type": {"application/x-www-form-urlencoded"}, }, } form := url.Values{} form.Add("trigger_word", "bitrise:") form.Add("text", "bitrise: branch:master") request.PostForm = form hookTransformResult := provider.TransformRequest(&request) require.NoError(t, hookTransformResult.Error) require.False(t, hookTransformResult.ShouldSkip) require.Equal(t, []bitriseapi.TriggerAPIParamsModel{ { BuildParams: bitriseapi.BuildParamsModel{ Branch: "master", Environments: []bitriseapi.EnvironmentItem{}, }, }, }, hookTransformResult.TriggerAPIParams) } t.Log("Unsupported Event Type") { request := http.Request{ Header: http.Header{ "Content-Type": {"application/json"}, }, } hookTransformResult := provider.TransformRequest(&request) require.False(t, hookTransformResult.ShouldSkip) require.EqualError(t, hookTransformResult.Error, "Content-Type is not supported: application/json") } t.Log("Missing 'text' from request data") { request := http.Request{ Header: http.Header{ "Content-Type": {"application/x-www-form-urlencoded"}, }, } form := url.Values{} form.Add("trigger_word", "the trigger word") request.PostForm = form hookTransformResult := provider.TransformRequest(&request) require.False(t, hookTransformResult.ShouldSkip) require.EqualError(t, hookTransformResult.Error, "Failed to parse the request/message: 'trigger_word' parameter found, but 'text' parameter is missing or empty") } }
func TestCrySameLocation(t *testing.T) { require.True(t, cryHeard()) require.True(t, cryHeard()) require.False(t, cryHeard()) require.True(t, cryHeard()) require.False(t, cryHeard()) require.False(t, cryHeard()) require.False(t, cryHeard()) require.True(t, cryHeard()) }
func TestIsBaseRole(t *testing.T) { for _, role := range BaseRoles { require.True(t, IsBaseRole(role)) } require.False(t, IsBaseRole("user")) require.False(t, IsBaseRole( path.Join(CanonicalTargetsRole, "level1", "level2", "level3"))) require.False(t, IsBaseRole(path.Join(CanonicalTargetsRole, "level1"))) require.False(t, IsBaseRole("")) }
func TestSizeCompare(t *testing.T) { require.True(t, sizeCompare(42, query.EQ, "42")) require.True(t, sizeCompare(42*1000, query.EQ, "42kb")) require.True(t, sizeCompare(42*1024, query.EQ, "42KB")) require.False(t, sizeCompare(42*1024, query.EQ, "42Mb")) require.True(t, sizeCompare(42*1024, query.GT, "42")) require.True(t, sizeCompare(42*1024, query.GT, "42kb")) require.False(t, sizeCompare(42*1024, query.GT, "42MB")) }
func TestStrCompare(t *testing.T) { require.True(t, strCompare("test", query.EQ, "test")) require.False(t, strCompare("test", query.EQ, "niet")) require.True(t, strCompare("test", query.LIKE, "test")) require.True(t, strCompare("test", query.LIKE, "tes")) require.True(t, strCompare("test", query.LIKE, "est")) require.True(t, strCompare("tEsT", query.LIKE, "eSt")) require.False(t, strCompare("test", query.LIKE, "niet")) }
func TestImportKeys2InOneFile(t *testing.T) { s := NewTestImportStore() b := &pem.Block{ Headers: make(map[string]string), } b.Bytes = make([]byte, 1000) rand.Read(b.Bytes) b.Headers["path"] = "ankh" b2 := &pem.Block{ Headers: make(map[string]string), } b2.Bytes = make([]byte, 1000) rand.Read(b2.Bytes) b2.Headers["path"] = "ankh" c := &pem.Block{ Headers: make(map[string]string), } c.Bytes = make([]byte, 1000) rand.Read(c.Bytes) c.Headers["path"] = "morpork" bBytes := pem.EncodeToMemory(b) b2Bytes := pem.EncodeToMemory(b2) bBytes = append(bBytes, b2Bytes...) cBytes := pem.EncodeToMemory(c) byt := append(bBytes, cBytes...) in := bytes.NewBuffer(byt) err := ImportKeys(in, []Importer{s}, "", "", passphraseRetriever) require.NoError(t, err) bFinal, bRest := pem.Decode(s.data["ankh"]) require.Equal(t, b.Bytes, bFinal.Bytes) _, ok := bFinal.Headers["path"] require.False(t, ok, "expected no path header, should have been removed at import") role, _ := bFinal.Headers["role"] require.Equal(t, notary.DefaultImportRole, role) b2Final, b2Rest := pem.Decode(bRest) require.Equal(t, b2.Bytes, b2Final.Bytes) _, ok = b2Final.Headers["path"] require.False(t, ok, "expected no path header, should have been removed at import") require.Len(t, b2Rest, 0) cFinal, cRest := pem.Decode(s.data["morpork"]) require.Equal(t, c.Bytes, cFinal.Bytes) _, ok = cFinal.Headers["path"] require.False(t, ok, "expected no path header, should have been removed at import") require.Len(t, cRest, 0) }
func TestSpecOptionalOption3Choice(t *testing.T) { var f, g, h *bool init := func(c *Cmd) { f = c.BoolOpt("f", false, "") g = c.BoolOpt("g", false, "") h = c.BoolOpt("x", false, "") } spec := "[-f|-g|-x]" okCmd(t, spec, init, []string{}) require.False(t, *f) require.False(t, *g) require.False(t, *h) okCmd(t, spec, init, []string{"-f"}) require.True(t, *f) require.False(t, *g) require.False(t, *h) okCmd(t, spec, init, []string{"-g"}) require.False(t, *f) require.True(t, *g) require.False(t, *h) okCmd(t, spec, init, []string{"-x"}) require.False(t, *f) require.False(t, *g) require.True(t, *h) }
func TestTruncation(t *testing.T) { //common.SetLogLevel("debug") peername, err := router.PeerNameFromString("00:00:00:02:00:00") require.Nil(t, err) nameserver := New(peername, nil, nil, "") dnsserver, err := NewDNSServer(nameserver, "weave.local.", "0.0.0.0:0", 30, 5*time.Second) require.Nil(t, err) udpPort := dnsserver.servers[0].PacketConn.LocalAddr().(*net.UDPAddr).Port tcpPort := dnsserver.servers[1].Listener.Addr().(*net.TCPAddr).Port go dnsserver.ActivateAndServe() defer dnsserver.Stop() // Add 100 mappings to nameserver addrs := []address.Address{} for i := address.Address(0); i < 100; i++ { addrs = append(addrs, i) nameserver.AddEntry("foo.weave.local.", "", peername, i) } doRequest := func(client *dns.Client, request *dns.Msg, port int) *dns.Msg { request.SetQuestion("foo.weave.local.", dns.TypeA) response, _, err := client.Exchange(request, fmt.Sprintf("127.0.0.1:%d", port)) require.Nil(t, err) return response } // do a udp query, ensure we get a truncated response { udpClient := dns.Client{Net: "udp", UDPSize: minUDPSize} response := doRequest(&udpClient, &dns.Msg{}, udpPort) require.Nil(t, err) require.True(t, response.MsgHdr.Truncated) require.True(t, len(response.Answer) < 100) } // do a udp query with big size, ensure we don't get a truncated response { udpClient := dns.Client{Net: "udp", UDPSize: 65535} request := &dns.Msg{} request.SetEdns0(65535, false) response := doRequest(&udpClient, request, udpPort) require.False(t, response.MsgHdr.Truncated) require.Equal(t, len(response.Answer), 100) } // do a tcp query, ensure we don't get a truncated response { tcpClient := dns.Client{Net: "tcp"} response := doRequest(&tcpClient, &dns.Msg{}, tcpPort) require.False(t, response.MsgHdr.Truncated) require.Equal(t, len(response.Answer), 100) } }
func TestSubscribeMessageFields(t *testing.T) { msg := NewSubscribeMessage() msg.SetPacketId(100) require.Equal(t, 100, int(msg.PacketId()), "Error setting packet ID.") msg.AddTopic([]byte("/a/b/#/c"), 1) require.Equal(t, 1, len(msg.Topics()), "Error adding topic.") require.False(t, msg.TopicExists([]byte("a/b")), "Topic should not exist.") msg.RemoveTopic([]byte("/a/b/#/c")) require.False(t, msg.TopicExists([]byte("/a/b/#/c")), "Topic should not exist.") }