func TestInFlightAAAAAndA(t *T) { m1 := new(dns.Msg) m1.SetQuestion(testAnyDomain, dns.TypeAAAA) w1 := getWriter() m2 := new(dns.Msg) m2.SetQuestion(testAnyDomain, dns.TypeA) w2 := getWriter() go func() { handleRequest(w1, m1) }() go func() { handleRequest(w2, m2) }() var r1 *dns.Msg var r2 *dns.Msg for r1 == nil || r2 == nil { select { case r1 = <-w1.ReplyCh: case r2 = <-w2.ReplyCh: } } require.Len(t, r1.Answer, 1) require.Len(t, r2.Answer, 1) assert.NotEqual(t, r1.Answer[0], r2.Answer[0]) }
func Test(t *T) { m1 := new(dns.Msg) m1.SetQuestion(testDomain, dns.TypeA) w1 := getWriter() go func() { handleRequest(w1, m1) }() r1 := <-w1.ReplyCh require.Len(t, r1.Answer, 1) m2 := new(dns.Msg) m2.SetQuestion(testDomain, dns.TypeA) r2, err := dns.Exchange(m2, "8.8.8.8:53") require.Nil(t, err) require.Len(t, r2.Answer, 1) assert.Equal(t, r2.Rcode, r1.Rcode) a1 := strings.Split(r1.Answer[0].String(), "\t") //example: a-test.mysuperfancyapi.com., 245, IN, A, 192.95.20.208 //we want to overwrite the TTL since that will be different a2 := strings.Split(r2.Answer[0].String(), "\t") a1[1] = "" a2[1] = "" assert.Equal(t, a2, a1) }
func TestCommandCollector(t *testing.T) { mock := CommandCollector{} SetRunForTesting(mock.Run) defer SetRunForTesting(DefaultRun) assert.NoError(t, Run(&Command{ Name: "touch", Args: []string{"foobar"}, })) assert.NoError(t, Run(&Command{ Name: "echo", Args: []string{"Hello Go!"}, })) commands := mock.Commands() assert.Len(t, commands, 2) expect.Equal(t, "touch foobar", DebugString(commands[0])) expect.Equal(t, "echo Hello Go!", DebugString(commands[1])) mock.ClearCommands() inputString := "foo\nbar\nbaz\n" output := bytes.Buffer{} assert.NoError(t, Run(&Command{ Name: "grep", Args: []string{"-e", "^ba"}, Stdin: bytes.NewReader([]byte(inputString)), Stdout: &output, })) commands = mock.Commands() assert.Len(t, commands, 1) expect.Equal(t, "grep -e ^ba", DebugString(commands[0])) actualInput, err := ioutil.ReadAll(commands[0].Stdin) assert.NoError(t, err) expect.Equal(t, inputString, string(actualInput)) expect.Equal(t, &output, commands[0].Stdout) }
func TestIndexCellsPoint(t *testing.T) { p := geom.NewPoint(geom.XY).MustSetCoords(geom.Coord{-122.082506, 37.4249518}) parents, cover, err := indexCells(types.Geo{p}) require.NoError(t, err) require.Len(t, parents, MaxCellLevel-MinCellLevel+1) c := parents[0] if c.Level() != MinCellLevel { t.Errorf("Expected cell level %d. Got %d instead.", MinCellLevel, c.Level()) } if c.ToToken() != "808c" { t.Errorf("Unexpected cell token %s.", c.ToToken()) } c = parents[len(parents)-1] if c.Level() != MaxCellLevel { t.Errorf("Expected cell level %d. Got %d instead.", MaxCellLevel, c.Level()) } if c.ToToken() != "808fb9f81" { t.Errorf("Unexpected cell token %s.", c.ToToken()) } // check that all cell levels are different pc := parents[0] for _, c := range parents[1:] { if c.Level() <= pc.Level() { t.Errorf("Expected cell to have level greater than %d. Got %d", pc.Level(), c.Level()) } pc = c } // Check that cover only has one item require.Len(t, cover, 1) c = cover[0] require.Equal(t, c.Level(), MaxCellLevel) require.Equal(t, c.ToToken(), "808fb9f81") }
func TestBlockHeaderPrecedenceGunFromPath(t *testing.T) { // this is a proof of concept that if we have legacy fixtures with nested paths, we infer the gun from them correctly s := NewTestImportStore() from, _ := os.OpenFile("../fixtures/secure.example.com.key", os.O_RDONLY, notary.PrivExecPerms) defer from.Close() fromBytes, _ := ioutil.ReadAll(from) b, _ := pem.Decode(fromBytes) b.Headers["role"] = data.CanonicalSnapshotRole b.Headers["path"] = filepath.Join(notary.NonRootKeysSubdir, "anothergun", "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497") bBytes := pem.EncodeToMemory(b) in := bytes.NewBuffer(bBytes) err := ImportKeys(in, []Importer{s}, "somerole", "somegun", passphraseRetriever) require.NoError(t, err) require.Len(t, s.data, 1) for key := range s.data { // block header role= root should take precedence over command line flag require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key) final, rest := pem.Decode(s.data[key]) require.Len(t, rest, 0) require.Equal(t, final.Headers["role"], "snapshot") require.Equal(t, final.Headers["gun"], "anothergun") } }
// path and encrypted key should succeed, tests gun inference from path as well func TestEncryptedKeyImportSuccess(t *testing.T) { s := NewTestImportStore() privKey, err := utils.GenerateECDSAKey(rand.Reader) originalKey := privKey.Private() require.NoError(t, err) pemBytes, err := utils.EncryptPrivateKey(privKey, data.CanonicalSnapshotRole, "somegun", cannedPassphrase) require.NoError(t, err) b, _ := pem.Decode(pemBytes) b.Headers["path"] = privKey.ID() pemBytes = pem.EncodeToMemory(b) in := bytes.NewBuffer(pemBytes) _ = ImportKeys(in, []Importer{s}, "", "", passphraseRetriever) require.Len(t, s.data, 1) keyBytes := s.data[privKey.ID()] bFinal, bRest := pem.Decode(keyBytes) require.Equal(t, "somegun", bFinal.Headers["gun"]) require.Len(t, bRest, 0) // we should fail to parse it without the passphrase privKey, err = utils.ParsePEMPrivateKey(keyBytes, "") require.Equal(t, err, errors.New("could not decrypt private key")) require.Nil(t, privKey) // we should succeed to parse it with the passphrase privKey, err = utils.ParsePEMPrivateKey(keyBytes, cannedPassphrase) require.NoError(t, err) require.Equal(t, originalKey, privKey.Private()) }
// removing one or more keys from a role marks root as dirty as well as the role func TestRemoveBaseKeysFromRoot(t *testing.T) { for _, role := range data.BaseRoles { ed25519 := signed.NewEd25519() repo := initRepo(t, ed25519) origKeyIDs := ed25519.ListKeys(role) require.Len(t, origKeyIDs, 1) require.Len(t, repo.Root.Signed.Roles[role].KeyIDs, 1) require.NoError(t, repo.RemoveBaseKeys(role, origKeyIDs...)) require.Len(t, repo.Root.Signed.Roles[role].KeyIDs, 0) require.True(t, repo.Root.Dirty) switch role { case data.CanonicalSnapshotRole: require.True(t, repo.Snapshot.Dirty) case data.CanonicalTargetsRole: require.True(t, repo.Targets[data.CanonicalTargetsRole].Dirty) case data.CanonicalTimestampRole: require.True(t, repo.Timestamp.Dirty) case data.CanonicalRootRole: require.Len(t, repo.originalRootRole.Keys, 1) require.Contains(t, repo.originalRootRole.ListKeyIDs(), origKeyIDs[0]) } } }
func TestBlockHeaderPrecedenceRoleAndGun(t *testing.T) { s := NewTestImportStore() from, _ := os.OpenFile("../fixtures/secure.example.com.key", os.O_RDONLY, notary.PrivExecPerms) defer from.Close() fromBytes, _ := ioutil.ReadAll(from) b, _ := pem.Decode(fromBytes) b.Headers["role"] = data.CanonicalSnapshotRole b.Headers["gun"] = "anothergun" bBytes := pem.EncodeToMemory(b) in := bytes.NewBuffer(bBytes) err := ImportKeys(in, []Importer{s}, "somerole", "somegun", passphraseRetriever) require.NoError(t, err) require.Len(t, s.data, 1) for key := range s.data { // block header role= root should take precedence over command line flag require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key) final, rest := pem.Decode(s.data[key]) require.Len(t, rest, 0) require.Equal(t, final.Headers["role"], "snapshot") require.Equal(t, final.Headers["gun"], "anothergun") } }
// adding a key to a role marks root as dirty as well as the role func TestAddBaseKeysToRoot(t *testing.T) { for _, role := range data.BaseRoles { ed25519 := signed.NewEd25519() repo := initRepo(t, ed25519) origKeyIDs := ed25519.ListKeys(role) require.Len(t, origKeyIDs, 1) key, err := ed25519.Create(role, testGUN, data.ED25519Key) require.NoError(t, err) require.Len(t, repo.Root.Signed.Roles[role].KeyIDs, 1) require.NoError(t, repo.AddBaseKeys(role, key)) _, ok := repo.Root.Signed.Keys[key.ID()] require.True(t, ok) require.Len(t, repo.Root.Signed.Roles[role].KeyIDs, 2) require.True(t, repo.Root.Dirty) switch role { case data.CanonicalSnapshotRole: require.True(t, repo.Snapshot.Dirty) case data.CanonicalTargetsRole: require.True(t, repo.Targets[data.CanonicalTargetsRole].Dirty) case data.CanonicalTimestampRole: require.True(t, repo.Timestamp.Dirty) case data.CanonicalRootRole: require.NoError(t, err) require.Len(t, repo.originalRootRole.Keys, 1) require.Contains(t, repo.originalRootRole.ListKeyIDs(), origKeyIDs[0]) } } }
func TestDeleteDelegationsMidSliceRole(t *testing.T) { ed25519 := signed.NewEd25519() repo := initRepo(t, ed25519) testKey, err := ed25519.Create("targets/test", testGUN, data.ED25519Key) require.NoError(t, err) err = repo.UpdateDelegationKeys("targets/test", []data.PublicKey{testKey}, []string{}, 1) require.NoError(t, err) err = repo.UpdateDelegationPaths("targets/test", []string{""}, []string{}, false) require.NoError(t, err) err = repo.UpdateDelegationKeys("targets/test2", []data.PublicKey{testKey}, []string{}, 1) require.NoError(t, err) err = repo.UpdateDelegationPaths("targets/test2", []string{""}, []string{}, false) require.NoError(t, err) err = repo.UpdateDelegationKeys("targets/test3", []data.PublicKey{testKey}, []string{}, 1) require.NoError(t, err) err = repo.UpdateDelegationPaths("targets/test3", []string{"test"}, []string{}, false) require.NoError(t, err) err = repo.DeleteDelegation("targets/test2") require.NoError(t, err) r, ok := repo.Targets[data.CanonicalTargetsRole] require.True(t, ok) require.Len(t, r.Signed.Delegations.Roles, 2) require.Len(t, r.Signed.Delegations.Keys, 1) require.True(t, r.Dirty) }
// A delegation can be created with a role that is missing a signing key, so // long as UpdateDelegations is called with the key func TestUpdateDelegationsRoleThatIsMissingDelegationKey(t *testing.T) { ed25519 := signed.NewEd25519() repo := initRepo(t, ed25519) roleKey, err := ed25519.Create("Invalid Role", testGUN, data.ED25519Key) require.NoError(t, err) // key should get added to role as part of updating the delegation err = repo.UpdateDelegationKeys("targets/role", []data.PublicKey{roleKey}, []string{}, 1) require.NoError(t, err) err = repo.UpdateDelegationPaths("targets/role", []string{""}, []string{}, false) require.NoError(t, err) r, ok := repo.Targets[data.CanonicalTargetsRole] require.True(t, ok) require.Len(t, r.Signed.Delegations.Roles, 1) require.Len(t, r.Signed.Delegations.Keys, 1) keyIDs := r.Signed.Delegations.Roles[0].KeyIDs require.Len(t, keyIDs, 1) require.Equal(t, roleKey.ID(), keyIDs[0]) require.True(t, r.Dirty) // no empty delegation metadata created for new delegation _, ok = repo.Targets["targets/role"] require.False(t, ok, "no targets file should be created for empty delegation") }
// If there is more than one key, removeKeyInteractively will ask which key to // delete. Then it will confirm whether they want to delete, and the user can // abort at that confirmation. func TestRemoveMultikeysAbortChoice(t *testing.T) { setUp(t) in := bytes.NewBuffer([]byte("1\nn\n")) key, err := trustmanager.GenerateED25519Key(rand.Reader) require.NoError(t, err) stores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), trustmanager.NewKeyMemoryStore(ret), } err = stores[0].AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, key) require.NoError(t, err) err = stores[1].AddKey(trustmanager.KeyInfo{Role: data.CanonicalTargetsRole, Gun: "gun"}, key) require.NoError(t, err) var out bytes.Buffer err = removeKeyInteractively(stores, key.ID(), in, &out) require.NoError(t, err) // no error to abort deleting text, err := ioutil.ReadAll(&out) require.NoError(t, err) require.Len(t, stores[0].ListKeys(), 1) require.Len(t, stores[1].ListKeys(), 1) // It should have listed the keys, asked whether the user really wanted to // delete, and then aborted. output := string(text) require.Contains(t, output, "Found the following matching keys") require.Contains(t, output, "Are you sure") require.Contains(t, output, "Aborting action") }
func TestModuleMethods(t *testing.T) { // Create a new module. m, err := NewModule("test2") require.Nil(t, err) require.NotNil(t, m) // Module Name: // ############ require.Equal(t, m.Name(), "test2") // Module AddMethod: // ################# initErrors = initErrors[:0] // Reset m.AddMethod("method", func(*Context) error { return nil }) require.Len(t, initErrors, 0) m.AddMethod("method", func(*Context) error { return nil }) require.Len(t, initErrors, 1) m.AddMethod("method1", func(*Context) error { return nil }, &TestHook{}) require.Len(t, initErrors, 1) m.AddMethod("method2", func(*Context) error { return nil }, &TestHook{}, &TestHook{}, &TestHook{}) require.Len(t, initErrors, 1) }
// UpdateTimestamp will re-calculate the snapshot hash func TestSwizzlerUpdateTimestamp(t *testing.T) { f, origMeta := createNewSwizzler(t) // nothing has changed, signed data should be the same (signatures might // change because signatures may have random elements f.UpdateTimestampHash() newMeta, err := f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1) require.NoError(t, err) origSigned, newSigned := &data.Signed{}, &data.Signed{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalTimestampRole], origSigned)) require.NoError(t, json.Unmarshal(newMeta, newSigned)) require.True(t, bytes.Equal(origSigned.Signed, newSigned.Signed)) // update snapshot f.OffsetMetadataVersion(data.CanonicalSnapshotRole, 1) // update the timestamp f.UpdateTimestampHash() newMeta, err = f.MetadataCache.GetMeta(data.CanonicalTimestampRole, -1) require.NoError(t, err) require.False(t, bytes.Equal(origMeta[data.CanonicalTimestampRole], newMeta)) origTimestamp, newTimestamp := &data.SignedTimestamp{}, &data.SignedTimestamp{} require.NoError(t, json.Unmarshal(origMeta[data.CanonicalTimestampRole], origTimestamp)) require.NoError(t, json.Unmarshal(newMeta, newTimestamp)) require.Len(t, origTimestamp.Signed.Meta, 1) require.Len(t, newTimestamp.Signed.Meta, 1) require.False(t, reflect.DeepEqual( origTimestamp.Signed.Meta[data.CanonicalSnapshotRole], newTimestamp.Signed.Meta[data.CanonicalSnapshotRole])) }
func TestParseNetstatOutput(t *testing.T) { nsInterfaces, err := parseNetstatOutput(netstatNotTruncated) assert.NoError(t, err) assert.Len(t, nsInterfaces, 8) for index := range nsInterfaces { assert.NotNil(t, nsInterfaces[index].stat, "Index %d", index) } assert.NotNil(t, nsInterfaces[0].linkId) assert.Equal(t, uint(1), *nsInterfaces[0].linkId) assert.Nil(t, nsInterfaces[1].linkId) assert.Nil(t, nsInterfaces[2].linkId) assert.Nil(t, nsInterfaces[3].linkId) assert.NotNil(t, nsInterfaces[4].linkId) assert.Equal(t, uint(2), *nsInterfaces[4].linkId) assert.NotNil(t, nsInterfaces[5].linkId) assert.Equal(t, uint(3), *nsInterfaces[5].linkId) assert.NotNil(t, nsInterfaces[6].linkId) assert.Equal(t, uint(4), *nsInterfaces[6].linkId) assert.Nil(t, nsInterfaces[7].linkId) mapUsage := newMapInterfaceNameUsage(nsInterfaces) assert.False(t, mapUsage.isTruncated()) assert.Len(t, mapUsage.notTruncated(), 4) }
func TestParseTLSWithTLS(t *testing.T) { config := configure(fmt.Sprintf(`{ "server": { "tls_cert_file": "%s", "tls_key_file": "%s", "client_ca_file": "%s" } }`, Cert, Key, Root)) tlsConfig, err := ParseServerTLS(config, false) require.NoError(t, err) expectedCert, err := tls.LoadX509KeyPair(Cert, Key) require.NoError(t, err) expectedRoot, err := trustmanager.LoadCertFromFile(Root) require.NoError(t, err) require.Len(t, tlsConfig.Certificates, 1) require.True(t, reflect.DeepEqual(expectedCert, tlsConfig.Certificates[0])) subjects := tlsConfig.ClientCAs.Subjects() require.Len(t, subjects, 1) require.True(t, bytes.Equal(expectedRoot.RawSubject, subjects[0])) require.Equal(t, tlsConfig.ClientAuth, tls.RequireAndVerifyClientCert) }
// This signs the metadata with the wrong key func TestSwizzlerSignMetadataWithInvalidKey(t *testing.T) { f, origMeta := createNewSwizzler(t) f.SignMetadataWithInvalidKey(data.CanonicalTimestampRole) for role, metaBytes := range origMeta { newMeta, err := f.MetadataCache.GetMeta(role, -1) require.NoError(t, err) if role != data.CanonicalTimestampRole { require.True(t, bytes.Equal(metaBytes, newMeta), "bytes have changed for role %s", role) } else { require.False(t, bytes.Equal(metaBytes, newMeta)) // it is JSON unmarshallable as a timestamp, but the signature ID // does not match. require.NoError(t, json.Unmarshal(newMeta, &data.SignedTimestamp{})) origSigned, newSigned := &data.Signed{}, &data.Signed{} require.NoError(t, json.Unmarshal(metaBytes, origSigned)) require.NoError(t, json.Unmarshal(newMeta, newSigned)) require.Len(t, origSigned.Signatures, 1) require.Len(t, newSigned.Signatures, 1) require.NotEqual(t, origSigned.Signatures[0].KeyID, newSigned.Signatures[0].KeyID) } } }
func TestParseTLSWithEnvironmentVariables(t *testing.T) { config := configure(fmt.Sprintf(`{ "server": { "tls_cert_file": "%s", "client_ca_file": "nosuchfile" } }`, Cert)) vars := map[string]string{ "SERVER_TLS_KEY_FILE": Key, "SERVER_CLIENT_CA_FILE": Root, } setupEnvironmentVariables(t, vars) defer cleanupEnvironmentVariables(t, vars) tlsConfig, err := ParseServerTLS(config, true) require.NoError(t, err) expectedCert, err := tls.LoadX509KeyPair(Cert, Key) require.NoError(t, err) expectedRoot, err := trustmanager.LoadCertFromFile(Root) require.NoError(t, err) require.Len(t, tlsConfig.Certificates, 1) require.True(t, reflect.DeepEqual(expectedCert, tlsConfig.Certificates[0])) subjects := tlsConfig.ClientCAs.Subjects() require.Len(t, subjects, 1) require.True(t, bytes.Equal(expectedRoot.RawSubject, subjects[0])) require.Equal(t, tlsConfig.ClientAuth, tls.RequireAndVerifyClientCert) }
func TestUserGroups(t *testing.T) { err := RegisterGroup("a") require.Nil(t, err) err = RegisterGroup("b") require.Nil(t, err) err = RegisterGroup("c") require.Nil(t, err) u, err := NewUser("foo", "bar", "*****@*****.**", "secretpassword") require.Nil(t, err) require.NotNil(t, u) u.AddGroup("a", "a", "a", "b", "c", "b") require.Len(t, u.Groups, 3) u.RemoveGroup("a", "b") require.Len(t, u.Groups, 1) u.Groups = append(u.Groups, "a", "a", "b", "b", "b") require.Len(t, u.Groups, 6) u.RemoveGroup("a", "b") require.Len(t, u.Groups, 1) }
// If, when adding a key to the Yubikey, and it already exists, we succeed // without adding it to the backup store. func TestYubiAddDuplicateKeySucceedsButDoesNotBackup(t *testing.T) { if !IsAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() origStore, err := NewYubiStore(trustmanager.NewKeyMemoryStore(ret), ret) require.NoError(t, err) key, err := testAddKey(t, origStore) require.NoError(t, err) backup := trustmanager.NewKeyMemoryStore(ret) cleanStore, err := NewYubiStore(backup, ret) require.NoError(t, err) require.Len(t, cleanStore.ListKeys(), 1) err = cleanStore.AddKey(trustmanager.KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, key) require.NoError(t, err) // there should be just 1 key on the yubikey require.Len(t, cleanListKeys(t), 1) // nothing was added to the backup require.Len(t, backup.ListKeys(), 0) }
func testStoreFeatures(t *testing.T, store Store) { // it should failt to get a feature that doesn't exist found, err := store.GetFeatureByName("awesome_feature") require.Equal(t, ErrNoRows, err) // it should get an empty list when there is no features list, err := store.ListFeatures() require.NoError(t, err) require.Empty(t, list) // it should create a feature feature := &models.Feature{ Name: "awesome_feature", } err = store.CreateFeature(feature) require.NoError(t, err) require.NotEqual(t, feature.ID, 0) require.Equal(t, "awesome_feature", feature.Name) require.Empty(t, feature.Status) // it should update a feature feature.Name = "not_awesome_feature" err = store.UpdateFeature(feature) require.NoError(t, err) // it should list all features list, err = store.ListFeatures() require.NoError(t, err) require.Len(t, list, 1) // it should get a feature with the status of all environments err = store.CreateEnvironment(&models.Environment{ Name: "staging", }) require.NoError(t, err) found, err = store.GetFeatureByName("not_awesome_feature") require.NoError(t, err) require.Len(t, found.Status, 1) require.Equal(t, false, found.Status["staging"]) // it should update the status of a feature in an environment found.Status["staging"] = true err = store.UpdateFeature(found) require.NoError(t, err) found, err = store.GetFeatureByName("not_awesome_feature") require.NoError(t, err) require.Equal(t, true, found.Status["staging"]) // it should list all features with the status on all environments list, err = store.ListFeatures() require.NoError(t, err) require.Len(t, list, 1) require.Equal(t, found.ID, list[0].ID) require.True(t, list[0].Status["staging"]) }
func TestByteConversion(t *testing.T) { numbers32 := []uint32{5251, uint32Max, 0, 1, 101, 2387, 219} for _, i := range numbers32 { data, err := uint32ToBytes(i) require.NoError(t, err) require.Len(t, data, 4) ii, err := bytesToUint32(data) require.NoError(t, err) require.True(t, ii == i) } numbers16 := []uint16{5251, uint16Max, 0, 1, 101, 2387, 219} for _, i := range numbers16 { data, err := uint16ToBytes(i) require.NoError(t, err) require.Len(t, data, 2) ii, err := bytesToUint16(data) require.NoError(t, err) require.True(t, ii == i) } }
func TestPollAndExecOnceError(t *testing.T) { task := pendingRecreateWebpageArchivesTask() mockServer := frontend.MockServer{} mockServer.SetCurrentTask(&task.RecreateWebpageArchivesDBTask) defer frontend.CloseTestServer(frontend.InitTestServer(&mockServer)) commandCollector := exec.CommandCollector{} mockRun := exec.MockRun{} commandCollector.SetDelegateRun(mockRun.Run) exec.SetRunForTesting(commandCollector.Run) defer exec.SetRunForTesting(exec.DefaultRun) mockRun.AddRule("capture_archives_on_workers", fmt.Errorf("workers too lazy")) pollAndExecOnce() // Expect only one poll. expect.Equal(t, 1, mockServer.OldestPendingTaskReqCount()) // Expect three commands: git pull; make all; capture_archives_on_workers ... commands := commandCollector.Commands() assert.Len(t, commands, 3) expect.Equal(t, "git pull", exec.DebugString(commands[0])) expect.Equal(t, "make all", exec.DebugString(commands[1])) expect.Equal(t, "capture_archives_on_workers", commands[2].Name) // Expect an update marking task failed when command fails to execute. assert.Len(t, mockServer.UpdateTaskReqs(), 1) updateReq := mockServer.UpdateTaskReqs()[0] assert.Equal(t, "/"+ctfeutil.UPDATE_RECREATE_WEBPAGE_ARCHIVES_TASK_POST_URI, updateReq.Url) assert.NoError(t, updateReq.Error) assert.False(t, updateReq.Vars.TsStarted.Valid) assert.True(t, updateReq.Vars.TsCompleted.Valid) assert.True(t, updateReq.Vars.Failure.Valid) assert.True(t, updateReq.Vars.Failure.Bool) assert.False(t, updateReq.Vars.RepeatAfterDays.Valid) assert.Equal(t, int64(42), updateReq.Vars.Id) }
func TestAdminUsersPageListsConnections(t *testing.T) { buildWebservice(true) //add some connections registerUser(t, "viktor", "pass") user2Id := registerUser(t, "user2", "pass") addConnection(t, "viktor", "pass", user2Id) getHttpReq, _ := http.NewRequest("GET", "/admin/user", nil) getHttpReq.Header.Set("Authorization", basicAuthEncode("admin", "pass")) httpWriter := httptest.NewRecorder() restful.DefaultContainer.ServeHTTP(httpWriter, getHttpReq) require.Equal(t, 200, httpWriter.Code) var users []UserWithConnections json.Unmarshal(httpWriter.Body.Bytes(), &users) require.Len(t, users, 3) assert.Equal(t, "admin", users[0].User.Username) assert.Len(t, users[0].Connections, 0) assert.Equal(t, "viktor", users[1].User.Username) require.Len(t, users[1].Connections, 1) assert.Equal(t, "user2", users[1].Connections[0].Username) assert.Equal(t, user2Id, users[1].Connections[0].Id) }
// Test that multiple buffer overflows are handled properly. func TestRunningOutputMultiOverwrite(t *testing.T) { conf := &OutputConfig{ Filter: Filter{ IsActive: false, }, } m := &mockOutput{} ro := NewRunningOutput("test", m, conf) ro.MetricBufferLimit = 3 for _, metric := range first5 { ro.AddMetric(metric) } for _, metric := range next5 { ro.AddMetric(metric) } require.Len(t, m.Metrics(), 0) err := ro.Write() require.NoError(t, err) require.Len(t, m.Metrics(), 3) var expected, actual []string for i, exp := range next5[2:] { expected = append(expected, exp.String()) actual = append(actual, m.Metrics()[i].String()) } sort.Strings(expected) sort.Strings(actual) assert.Equal(t, expected, actual) }
func TestSetSingleAndSetMultiMeta(t *testing.T) { metas := map[string][]byte{ "root": []byte("root data"), "targets": []byte("targets data"), } var updates map[string][]byte handler := func(w http.ResponseWriter, r *http.Request) { reader, err := r.MultipartReader() require.NoError(t, err) updates = make(map[string][]byte) for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") updates[role], err = ioutil.ReadAll(part) require.NoError(t, err) } } server := httptest.NewServer(http.HandlerFunc(handler)) defer server.Close() store, err := NewHTTPStore(server.URL, "metadata", "json", "key", http.DefaultTransport) require.NoError(t, err) require.NoError(t, store.SetMulti(metas)) require.Len(t, updates, 2) rd, rok := updates["root"] require.True(t, rok) require.Equal(t, rd, metas["root"]) td, tok := updates["targets"] require.True(t, tok) require.Equal(t, td, metas["targets"]) require.NoError(t, store.Set("root", metas["root"])) require.Len(t, updates, 1) rd, rok = updates["root"] require.True(t, rok) require.Equal(t, rd, metas["root"]) // if there is a network error, it gets translated to NetworkError store, err = NewHTTPStore( server.URL, "metadata", "txt", "key", failRoundTripper{}, ) require.NoError(t, err) err = store.SetMulti(metas) require.IsType(t, NetworkError{}, err) require.Equal(t, "FAIL", err.Error()) err = store.Set("root", metas["root"]) require.IsType(t, NetworkError{}, err) require.Equal(t, "FAIL", err.Error()) }
// We can read and publish from notary0.1 repos func Test0Dot1RepoFormat(t *testing.T) { // make a temporary directory and copy the fixture into it, since updating // and publishing will modify the files tmpDir, err := ioutil.TempDir("", "notary-backwards-compat-test") defer os.RemoveAll(tmpDir) require.NoError(t, err) require.NoError(t, recursiveCopy("../fixtures/compatibility/notary0.1", tmpDir)) gun := "docker.com/notary0.1/samplerepo" passwd := "randompass" ts := fullTestServer(t) defer ts.Close() repo, err := NewFileCachedNotaryRepository(tmpDir, gun, ts.URL, http.DefaultTransport, passphrase.ConstantRetriever(passwd), trustpinning.TrustPinConfig{}) require.NoError(t, err, "error creating repo: %s", err) // targets should have 1 target, and it should be readable offline targets, err := repo.ListTargets() require.NoError(t, err) require.Len(t, targets, 1) require.Equal(t, "LICENSE", targets[0].Name) // ok, now that everything has been loaded, verify that the fixture is valid requireValidFixture(t, repo) // delete the timestamp metadata, since the server will ignore the uploaded // one and try to create a new one from scratch, which will be the wrong version require.NoError(t, repo.cache.Remove(data.CanonicalTimestampRole)) // rotate the timestamp key, since the server doesn't have that one err = repo.RotateKey(data.CanonicalTimestampRole, true) require.NoError(t, err) require.NoError(t, repo.Publish()) targets, err = repo.ListTargets() require.NoError(t, err) require.Len(t, targets, 2) // Also check that we can add/remove keys by rotating keys oldTargetsKeys := repo.CryptoService.ListKeys(data.CanonicalTargetsRole) require.NoError(t, repo.RotateKey(data.CanonicalTargetsRole, false)) require.NoError(t, repo.Publish()) newTargetsKeys := repo.CryptoService.ListKeys(data.CanonicalTargetsRole) require.Len(t, oldTargetsKeys, 1) require.Len(t, newTargetsKeys, 1) require.NotEqual(t, oldTargetsKeys[0], newTargetsKeys[0]) // rotate the snapshot key to the server and ensure that the server can re-generate the snapshot // and we can download the snapshot require.NoError(t, repo.RotateKey(data.CanonicalSnapshotRole, true)) require.NoError(t, repo.Publish()) err = repo.Update(false) require.NoError(t, err) }
// TestUpdate tests the normal flows of Update. // TestUpdate performs consecutive calls to Update with both empty and non-empty caches func TestUpdate(t *testing.T) { var ( cluster = newRealModel(time.Minute) source_cache = cacheFactory() empty_cache = cache.NewCache(24*time.Hour, time.Hour) zeroTime = time.Time{} assert = assert.New(t) require = require.New(t) ) // Invocation with empty cache assert.NoError(cluster.Update(empty_cache)) assert.Empty(cluster.Nodes) assert.Empty(cluster.Namespaces) assert.Empty(cluster.Metrics) // Invocation with regular parameters assert.NoError(cluster.Update(source_cache)) verifyCacheFactoryCluster(&cluster.ClusterInfo, t) // Assert Node Metric aggregation require.NotEmpty(cluster.Nodes) require.NotEmpty(cluster.Metrics) require.NotNil(cluster.Metrics[memWorking]) mem_work_ts := *(cluster.Metrics[memWorking]) actual := mem_work_ts.Hour.Get(zeroTime, zeroTime) require.Len(actual, 6) // Datapoint present in both nodes, assert.Equal(actual[0].Value, uint64(602+602)) assert.Equal(actual[1].Value, 2*memWorkingEpsilon) assert.Equal(actual[5].Value, 2*memWorkingEpsilon) require.NotNil(cluster.Metrics[memUsage]) mem_usage_ts := *(cluster.Metrics[memUsage]) actual = mem_usage_ts.Hour.Get(zeroTime, zeroTime) require.Len(actual, 6) // Datapoint present in only one node, second node's metric is extended assert.Equal(actual[0].Value, uint64(10000)) // Datapoint present in both nodes, added up to 10000 assert.Equal(actual[1].Value, 2*memWorkingEpsilon) // Assert Kubernetes Metric aggregation up to namespaces ns := cluster.Namespaces["test"] mem_work_ts = *(ns.Metrics[memWorking]) actual = mem_work_ts.Hour.Get(zeroTime, zeroTime) require.Len(actual, 8) assert.Equal(actual[0].Value, uint64(2408)) // Invocation with no fresh data - expect no change in cluster assert.NoError(cluster.Update(source_cache)) verifyCacheFactoryCluster(&cluster.ClusterInfo, t) // Invocation with empty cache - expect no change in cluster assert.NoError(cluster.Update(empty_cache)) verifyCacheFactoryCluster(&cluster.ClusterInfo, t) }
// The command line uses NotaryRepository's RotateKey - this is just testing // that multiple keys can be rotated at once locally func TestRotateKeyBothKeys(t *testing.T) { setUp(t) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("/tmp", "notary-test-") defer os.RemoveAll(tempBaseDir) require.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ret := passphrase.ConstantRetriever("pass") ts, initialKeys := setUpRepo(t, tempBaseDir, gun, ret) defer ts.Close() k := &keyCommander{ configGetter: func() (*viper.Viper, error) { v := viper.New() v.SetDefault("trust_dir", tempBaseDir) v.SetDefault("remote_server.url", ts.URL) return v, nil }, getRetriever: func() notary.PassRetriever { return ret }, } require.NoError(t, k.keysRotate(&cobra.Command{}, []string{gun, data.CanonicalTargetsRole})) require.NoError(t, k.keysRotate(&cobra.Command{}, []string{gun, data.CanonicalSnapshotRole})) repo, err := client.NewNotaryRepository(tempBaseDir, gun, ts.URL, nil, ret, trustpinning.TrustPinConfig{}) require.NoError(t, err, "error creating repo: %s", err) cl, err := repo.GetChangelist() require.NoError(t, err, "unable to get changelist: %v", err) require.Len(t, cl.List(), 0) // two new keys have been created, and the old keys should still be gone newKeys := repo.CryptoService.ListAllKeys() // there should be 3 keys - snapshot, targets, and root require.Len(t, newKeys, 3) // the old snapshot/targets keys should be gone for keyID, role := range initialKeys { r, ok := newKeys[keyID] switch r { case data.CanonicalSnapshotRole, data.CanonicalTargetsRole: require.False(t, ok, "original key %s still there", keyID) case data.CanonicalRootRole: require.Equal(t, role, r) require.True(t, ok, "old root key has changed") } } found := make(map[string]bool) for _, role := range newKeys { found[role] = true } require.True(t, found[data.CanonicalTargetsRole], "targets key was not created") require.True(t, found[data.CanonicalSnapshotRole], "snapshot key was not created") require.True(t, found[data.CanonicalRootRole], "root key was removed somehow") }
func TestOtherHostsEntries(t *testing.T) { listener1, st, dc1 := setup("192.168.11.34") dc2 := newMockInspector() listener2 := NewListener(Config{ Store: st, HostIP: "192.168.11.5", Inspector: dc2, }) st.AddService("foo-svc", data.Service{}) addGroup(st, "foo-svc", nil, "image", "foo-image") dc1.startContainers(container{ ID: "bar1", IPAddress: "192.168.34.1", Image: "foo-image:version", }, container{ ID: "baz1", IPAddress: "192.168.34.2", Image: "foo-image:version2", }) dc2.startContainers(container{ ID: "bar2", IPAddress: "192.168.34.3", Image: "foo-image:version", }, container{ ID: "baz2", IPAddress: "192.168.34.4", Image: "foo-image:version2", }) // let listener on the first host add its instances listener1.ReadInServices() listener1.ReadExistingContainers() require.Len(t, allInstances(st), 2) // let listener on the second host add its instances listener2.ReadInServices() listener2.ReadExistingContainers() require.Len(t, allInstances(st), 4) // simulate an agent restart; in the meantime, a container has // stopped. dc2.stopContainer("baz2") // NB: the Read* methods assume once-only execution, on startup. listener2 = NewListener(Config{ Store: st, HostIP: "192.168.11.5", Inspector: dc2, }) listener2.ReadExistingContainers() listener2.ReadInServices() require.Len(t, allInstances(st), 3) }