Esempio n. 1
0
func TestRotationNewSigMissing(t *testing.T) {
	logrus.SetLevel(logrus.DebugLevel)
	kdb := keys.NewDB()
	signer := signed.NewEd25519()
	repo := tuf.NewRepo(kdb, signer)
	remote := store.NewMemoryStore(nil, nil)
	cache := store.NewMemoryStore(nil, nil)

	// Generate initial root key and role and add to key DB
	rootKey, err := signer.Create("root", data.ED25519Key)
	assert.NoError(t, err, "Error creating root key")
	rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil)
	assert.NoError(t, err, "Error creating root role")

	kdb.AddKey(rootKey)
	err = kdb.AddRole(rootRole)
	assert.NoError(t, err, "Error adding root role to db")

	// Generate new key and role. These will appear in the root.json
	// but will not be added to the keyDB.
	replacementKey, err := signer.Create("root", data.ED25519Key)
	assert.NoError(t, err, "Error creating replacement root key")
	replacementRole, err := data.NewRole("root", 1, []string{replacementKey.ID()}, nil, nil)
	assert.NoError(t, err, "Error creating replacement root role")

	assert.NotEqual(t, rootKey.ID(), replacementKey.ID(), "Key IDs are the same")

	// Generate a new root with the replacement key and role
	testRoot, err := data.NewRoot(
		map[string]data.PublicKey{replacementKey.ID(): replacementKey},
		map[string]*data.RootRole{"root": &replacementRole.RootRole},
		false,
	)
	assert.NoError(t, err, "Failed to create new root")

	_, ok := testRoot.Signed.Keys[rootKey.ID()]
	assert.False(t, ok, "Old root key appeared in test root")

	// Sign testRoot with both old and new keys
	signedRoot, err := testRoot.ToSigned()
	err = signed.Sign(signer, signedRoot, rootKey)
	assert.NoError(t, err, "Failed to sign root")
	var origKeySig bool
	var replKeySig bool
	for _, sig := range signedRoot.Signatures {
		if sig.KeyID == rootKey.ID() {
			origKeySig = true
		} else if sig.KeyID == replacementKey.ID() {
			replKeySig = true
		}
	}
	assert.True(t, origKeySig, "Original root key signature not present")
	assert.False(t, replKeySig, "Replacement root key signature was present and shouldn't be")

	client := NewClient(repo, remote, kdb, cache)

	err = client.verifyRoot("root", signedRoot, 0)
	assert.Error(t, err, "Should have errored on verify as replacement signature was missing.")

}
Esempio n. 2
0
func TestRotation(t *testing.T) {
	signer := signed.NewEd25519()
	repo := tuf.NewRepo(signer)
	remote := store.NewMemoryStore(nil)
	cache := store.NewMemoryStore(nil)

	// Generate initial root key and role and add to key DB
	rootKey, err := signer.Create("root", data.ED25519Key)
	assert.NoError(t, err, "Error creating root key")
	rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil)
	assert.NoError(t, err, "Error creating root role")

	originalRoot, err := data.NewRoot(
		map[string]data.PublicKey{rootKey.ID(): rootKey},
		map[string]*data.RootRole{"root": &rootRole.RootRole},
		false,
	)

	repo.Root = originalRoot

	// Generate new key and role.
	replacementKey, err := signer.Create("root", data.ED25519Key)
	assert.NoError(t, err, "Error creating replacement root key")
	replacementRole, err := data.NewRole("root", 1, []string{replacementKey.ID()}, nil)
	assert.NoError(t, err, "Error creating replacement root role")

	// Generate a new root with the replacement key and role
	testRoot, err := data.NewRoot(
		map[string]data.PublicKey{replacementKey.ID(): replacementKey},
		map[string]*data.RootRole{
			data.CanonicalRootRole:      &replacementRole.RootRole,
			data.CanonicalSnapshotRole:  &replacementRole.RootRole,
			data.CanonicalTargetsRole:   &replacementRole.RootRole,
			data.CanonicalTimestampRole: &replacementRole.RootRole,
		},
		false,
	)
	assert.NoError(t, err, "Failed to create new root")

	// Sign testRoot with both old and new keys
	signedRoot, err := testRoot.ToSigned()
	err = signed.Sign(signer, signedRoot, rootKey, replacementKey)
	assert.NoError(t, err, "Failed to sign root")
	var origKeySig bool
	var replKeySig bool
	for _, sig := range signedRoot.Signatures {
		if sig.KeyID == rootKey.ID() {
			origKeySig = true
		} else if sig.KeyID == replacementKey.ID() {
			replKeySig = true
		}
	}
	assert.True(t, origKeySig, "Original root key signature not present")
	assert.True(t, replKeySig, "Replacement root key signature not present")

	client := NewClient(repo, remote, cache)

	err = client.verifyRoot("root", signedRoot, 0)
	assert.NoError(t, err, "Failed to verify key rotated root")
}
Esempio n. 3
0
func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
	var rootJSON []byte
	remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
	if err == nil {
		// if remote store successfully set up, try and get root from remote
		rootJSON, err = remote.GetMeta("root", maxSize)
	}

	// if remote store couldn't be setup, or we failed to get a root from it
	// load the root from cache (offline operation)
	if err != nil {
		if err, ok := err.(store.ErrMetaNotFound); ok {
			// if the error was MetaNotFound then we successfully contacted
			// the store and it doesn't know about the repo.
			return nil, err
		}
		result, cacheErr := r.fileStore.GetMeta("root", maxSize)
		if cacheErr != nil {
			// if cache didn't return a root, we cannot proceed - just return
			// the original error.
			return nil, err
		}
		rootJSON = result
		logrus.Debugf(
			"Using local cache instead of remote due to failure: %s", err.Error())
	}
	// can't just unmarshal into SignedRoot because validate root
	// needs the root.Signed field to still be []byte for signature
	// validation
	root := &data.Signed{}
	err = json.Unmarshal(rootJSON, root)
	if err != nil {
		return nil, err
	}

	err = r.CertManager.ValidateRoot(root, r.gun)
	if err != nil {
		return nil, err
	}

	kdb := keys.NewDB()
	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)

	signedRoot, err := data.RootFromSigned(root)
	if err != nil {
		return nil, err
	}
	err = r.tufRepo.SetRoot(signedRoot)
	if err != nil {
		return nil, err
	}

	return tufclient.NewClient(
		r.tufRepo,
		remote,
		kdb,
		r.fileStore,
	), nil
}
Esempio n. 4
0
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
func (r *NotaryRepository) DeleteTrustData() error {
	// Clear TUF files and cache
	if err := r.fileStore.RemoveAll(); err != nil {
		return fmt.Errorf("error clearing TUF repo data: %v", err)
	}
	r.tufRepo = tuf.NewRepo(nil)
	return nil
}
Esempio n. 5
0
func TestValidateTargetsRoleNotInParent(t *testing.T) {
	baseRepo, cs, err := testutils.EmptyRepo("docker.com/notary")
	assert.NoError(t, err)
	store := storage.NewMemStorage()

	level1Key, err := cs.Create("targets/level1", data.ED25519Key)
	assert.NoError(t, err)
	r, err := data.NewRole("targets/level1", 1, []string{level1Key.ID()}, []string{""})

	baseRepo.Targets[data.CanonicalTargetsRole].Signed.Delegations.Roles = []*data.Role{r}
	baseRepo.Targets[data.CanonicalTargetsRole].Signed.Delegations.Keys = data.Keys{
		level1Key.ID(): level1Key,
	}

	baseRepo.InitTargets("targets/level1")

	del, err := baseRepo.SignTargets("targets/level1", data.DefaultExpires(data.CanonicalTargetsRole))
	assert.NoError(t, err)
	delJSON, err := json.Marshal(del)
	assert.NoError(t, err)

	delUpdate := storage.MetaUpdate{
		Role:    "targets/level1",
		Version: 1,
		Data:    delJSON,
	}

	// set back to empty so stored targets doesn't have reference to level1
	baseRepo.Targets[data.CanonicalTargetsRole].Signed.Delegations.Roles = nil
	baseRepo.Targets[data.CanonicalTargetsRole].Signed.Delegations.Keys = nil
	targets, err := baseRepo.SignTargets(data.CanonicalTargetsRole, data.DefaultExpires(data.CanonicalTargetsRole))

	tgtsJSON, err := json.Marshal(targets)
	assert.NoError(t, err)
	update := storage.MetaUpdate{
		Role:    data.CanonicalTargetsRole,
		Version: 1,
		Data:    tgtsJSON,
	}
	store.UpdateCurrent("gun", update)

	roles := map[string]storage.MetaUpdate{
		"targets/level1":          delUpdate,
		data.CanonicalTargetsRole: update,
	}

	valRepo := tuf.NewRepo(nil)
	valRepo.SetRoot(baseRepo.Root)

	// because we sort the roles, the list of returned updates
	// will contain shallower roles first, in this case "targets",
	// and then "targets/level1"
	updates, err := loadAndValidateTargets("gun", valRepo, roles, store)
	assert.NoError(t, err)
	assert.Len(t, updates, 1)
	assert.Equal(t, data.CanonicalTargetsRole, updates[0].Role)
	assert.Equal(t, tgtsJSON, updates[0].Data)
}
Esempio n. 6
0
func TestValidateTargetsParentInUpdate(t *testing.T) {
	_, baseRepo, cs, err := testutils.EmptyRepo("docker.com/notary")
	assert.NoError(t, err)
	store := storage.NewMemStorage()

	k, err := cs.Create("targets/level1", data.ED25519Key)
	assert.NoError(t, err)
	r, err := data.NewRole("targets/level1", 1, []string{k.ID()}, []string{""}, nil)
	assert.NoError(t, err)

	baseRepo.UpdateDelegations(r, []data.PublicKey{k})

	// no targets file is created for the new delegations, so force one
	baseRepo.InitTargets("targets/level1")

	targets, err := baseRepo.SignTargets("targets", data.DefaultExpires(data.CanonicalTargetsRole))

	tgtsJSON, err := json.Marshal(targets)
	assert.NoError(t, err)
	update := storage.MetaUpdate{
		Role:    data.CanonicalTargetsRole,
		Version: 1,
		Data:    tgtsJSON,
	}
	store.UpdateCurrent("gun", update)

	del, err := baseRepo.SignTargets("targets/level1", data.DefaultExpires(data.CanonicalTargetsRole))
	assert.NoError(t, err)
	delJSON, err := json.Marshal(del)
	assert.NoError(t, err)

	delUpdate := storage.MetaUpdate{
		Role:    "targets/level1",
		Version: 1,
		Data:    delJSON,
	}

	roles := map[string]storage.MetaUpdate{
		"targets/level1": delUpdate,
		"targets":        update,
	}

	kdb := keys.NewDB()
	valRepo := tuf.NewRepo(kdb, nil)
	valRepo.SetRoot(baseRepo.Root)

	// because we sort the roles, the list of returned updates
	// will contain shallower roles first, in this case "targets",
	// and then "targets/level1"
	updates, err := loadAndValidateTargets("gun", valRepo, roles, kdb, store)
	assert.NoError(t, err)
	assert.Len(t, updates, 2)
	assert.Equal(t, "targets", updates[0].Role)
	assert.Equal(t, tgtsJSON, updates[0].Data)
	assert.Equal(t, "targets/level1", updates[1].Role)
	assert.Equal(t, delJSON, updates[1].Data)
}
Esempio n. 7
0
func TestValidateTargetsRoleNotInParent(t *testing.T) {
	kdb, baseRepo, cs := testutils.EmptyRepo()
	store := storage.NewMemStorage()

	k, err := cs.Create("targets/level1", data.ED25519Key)
	assert.NoError(t, err)
	r, err := data.NewRole("targets/level1", 1, []string{k.ID()}, []string{""}, nil)
	assert.NoError(t, err)

	kdb.AddKey(k)
	err = kdb.AddRole(r)
	assert.NoError(t, err)

	baseRepo.InitTargets("targets/level1")

	targets, err := baseRepo.SignTargets("targets", data.DefaultExpires(data.CanonicalTargetsRole))

	tgtsJSON, err := json.MarshalCanonical(targets)
	assert.NoError(t, err)
	update := storage.MetaUpdate{
		Role:    data.CanonicalTargetsRole,
		Version: 1,
		Data:    tgtsJSON,
	}
	store.UpdateCurrent("gun", update)

	del, err := baseRepo.SignTargets("targets/level1", data.DefaultExpires(data.CanonicalTargetsRole))
	assert.NoError(t, err)
	delJSON, err := json.MarshalCanonical(del)
	assert.NoError(t, err)

	delUpdate := storage.MetaUpdate{
		Role:    "targets/level1",
		Version: 1,
		Data:    delJSON,
	}

	roles := map[string]storage.MetaUpdate{
		"targets/level1": delUpdate,
		"targets":        update,
	}

	kdb = keys.NewDB()
	valRepo := tuf.NewRepo(kdb, nil)
	valRepo.SetRoot(baseRepo.Root)

	// because we sort the roles, the list of returned updates
	// will contain shallower roles first, in this case "targets",
	// and then "targets/level1"
	updates, err := loadAndValidateTargets("gun", valRepo, roles, kdb, store)
	assert.NoError(t, err)
	assert.Len(t, updates, 1)
	assert.Equal(t, "targets", updates[0].Role)
	assert.Equal(t, tgtsJSON, updates[0].Data)
}
Esempio n. 8
0
func TestValidateTargetsLoadParent(t *testing.T) {
	_, baseRepo, cs, err := testutils.EmptyRepo("docker.com/notary")
	assert.NoError(t, err)
	store := storage.NewMemStorage()

	k, err := cs.Create("targets/level1", data.ED25519Key)
	assert.NoError(t, err)
	r, err := data.NewRole("targets/level1", 1, []string{k.ID()}, []string{""}, nil)
	assert.NoError(t, err)

	err = baseRepo.UpdateDelegations(r, []data.PublicKey{k})
	assert.NoError(t, err)

	// no targets file is created for the new delegations, so force one
	baseRepo.InitTargets("targets/level1")

	// we're not going to validate things loaded from storage, so no need
	// to sign the base targets, just Marshal it and set it into storage
	tgtsJSON, err := json.Marshal(baseRepo.Targets["targets"])
	assert.NoError(t, err)
	update := storage.MetaUpdate{
		Role:    data.CanonicalTargetsRole,
		Version: 1,
		Data:    tgtsJSON,
	}
	store.UpdateCurrent("gun", update)

	// generate the update object we're doing to use to call loadAndValidateTargets
	del, err := baseRepo.SignTargets("targets/level1", data.DefaultExpires(data.CanonicalTargetsRole))
	assert.NoError(t, err)
	delJSON, err := json.Marshal(del)
	assert.NoError(t, err)

	delUpdate := storage.MetaUpdate{
		Role:    "targets/level1",
		Version: 1,
		Data:    delJSON,
	}

	roles := map[string]storage.MetaUpdate{"targets/level1": delUpdate}

	kdb := keys.NewDB()
	valRepo := tuf.NewRepo(kdb, nil)
	valRepo.SetRoot(baseRepo.Root)

	updates, err := loadAndValidateTargets("gun", valRepo, roles, kdb, store)
	assert.NoError(t, err)
	assert.Len(t, updates, 1)
	assert.Equal(t, "targets/level1", updates[0].Role)
	assert.Equal(t, delJSON, updates[0].Data)
}
Esempio n. 9
0
// This changes the root key
func TestSwizzlerChangeRootKey(t *testing.T) {
	f, origMeta := createNewSwizzler(t)

	f.ChangeRootKey()

	tufRepo := tuf.NewRepo(f.CryptoService)

	// we want to test these in a specific order
	roles := []string{data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole,
		data.CanonicalTimestampRole, "targets/a", "targets/a/b"}

	for _, role := range roles {
		origMeta := origMeta[role]
		newMeta, err := f.MetadataCache.GetMeta(role, -1)
		require.NoError(t, err)

		// the threshold for base roles is set in root
		switch role {
		case data.CanonicalRootRole:
			require.False(t, bytes.Equal(origMeta, newMeta))
			origRoot, newRoot := &data.SignedRoot{}, &data.SignedRoot{}
			require.NoError(t, json.Unmarshal(origMeta, origRoot))
			require.NoError(t, json.Unmarshal(newMeta, newRoot))

			require.NotEqual(t, len(origRoot.Signed.Keys), len(newRoot.Signed.Keys))

			var rootRole data.Role
			for r, origRole := range origRoot.Signed.Roles {
				newRole := newRoot.Signed.Roles[r]
				require.Len(t, origRole.KeyIDs, 1)
				require.Len(t, newRole.KeyIDs, 1)
				if r == data.CanonicalRootRole {
					require.NotEqual(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
					rootRole = data.Role{RootRole: *newRole, Name: data.CanonicalRootRole}
				} else {
					require.Equal(t, origRole.KeyIDs[0], newRole.KeyIDs[0])
				}
			}

			require.NoError(t, tufRepo.SetRoot(newRoot))
			signedThing, err := newRoot.ToSigned()
			require.NoError(t, err)
			newKey := newRoot.Signed.Keys[rootRole.KeyIDs[0]]
			require.NoError(t, signed.Verify(signedThing,
				data.BaseRole{Name: data.CanonicalRootRole, Keys: map[string]data.PublicKey{newKey.ID(): newKey}, Threshold: 1}, 1))
		default:
			require.True(t, bytes.Equal(origMeta, newMeta), "bytes have changed for role %s", role)
		}
	}
}
Esempio n. 10
0
// EmptyRepo creates an in memory crypto service
// and initializes a repo with no targets.  Delegations are only created
// if delegation roles are passed in.
func EmptyRepo(gun string, delegationRoles ...string) (*tuf.Repo, signed.CryptoService, error) {
	cs := cryptoservice.NewCryptoService(
		gun, trustmanager.NewKeyMemoryStore(passphrase.ConstantRetriever("")))
	r := tuf.NewRepo(cs)

	baseRoles := map[string]data.BaseRole{}
	for _, role := range data.BaseRoles {
		key, err := createKey(cs, gun, role)
		if err != nil {
			return nil, nil, err
		}
		baseRoles[role] = data.NewBaseRole(
			role,
			1,
			key,
		)
	}

	r.InitRoot(
		baseRoles[data.CanonicalRootRole],
		baseRoles[data.CanonicalTimestampRole],
		baseRoles[data.CanonicalSnapshotRole],
		baseRoles[data.CanonicalTargetsRole],
		false,
	)
	r.InitTargets(data.CanonicalTargetsRole)
	r.InitSnapshot()
	r.InitTimestamp()

	// sort the delegation roles so that we make sure to create the parents
	// first
	sort.Strings(delegationRoles)
	for _, delgName := range delegationRoles {
		// create a delegations key and a delegation in the tuf repo
		delgKey, err := createKey(cs, gun, delgName)
		if err != nil {
			return nil, nil, err
		}
		role, err := data.NewRole(delgName, 1, []string{}, []string{""})
		if err != nil {
			return nil, nil, err
		}
		if err := r.UpdateDelegations(role, []data.PublicKey{delgKey}); err != nil {
			return nil, nil, err
		}
	}

	return r, cs, nil
}
Esempio n. 11
0
// EmptyRepo creates an in memory key database, crypto service
// and initializes a repo with no targets or delegations.
func EmptyRepo() (*keys.KeyDB, *tuf.Repo, signed.CryptoService) {
	c := signed.NewEd25519()
	kdb := keys.NewDB()
	r := tuf.NewRepo(kdb, c)

	for _, role := range []string{"root", "targets", "snapshot", "timestamp"} {
		key, _ := c.Create(role, data.ED25519Key)
		role, _ := data.NewRole(role, 1, []string{key.ID()}, nil, nil)
		kdb.AddKey(key)
		kdb.AddRole(role)
	}

	r.InitRepo(false)
	return kdb, r, c
}
Esempio n. 12
0
func TestChecksumMatch(t *testing.T) {
	repo := tuf.NewRepo(nil, nil)
	localStorage := store.NewMemoryStore(nil, nil)
	remoteStorage := store.NewMemoryStore(nil, nil)
	client := NewClient(repo, remoteStorage, nil, localStorage)

	sampleTargets := data.NewTargets()
	orig, err := json.Marshal(sampleTargets)
	origSha256 := sha256.Sum256(orig)
	assert.NoError(t, err)

	remoteStorage.SetMeta("targets", orig)

	_, _, err = client.downloadSigned("targets", int64(len(orig)), origSha256[:])
	assert.NoError(t, err)
}
Esempio n. 13
0
func TestApplyChangelist(t *testing.T) {
	kdb := keys.NewDB()
	role, err := data.NewRole("targets", 1, nil, nil, nil)
	assert.NoError(t, err)
	kdb.AddRole(role)

	repo := tuf.NewRepo(kdb, nil)
	err = repo.InitTargets()
	assert.NoError(t, err)
	hash := sha256.Sum256([]byte{})
	f := &data.FileMeta{
		Length: 1,
		Hashes: map[string][]byte{
			"sha256": hash[:],
		},
	}
	fjson, err := json.Marshal(f)
	assert.NoError(t, err)

	cl := changelist.NewMemChangelist()
	addChange := &changelist.TufChange{
		Actn:       changelist.ActionCreate,
		Role:       changelist.ScopeTargets,
		ChangeType: "target",
		ChangePath: "latest",
		Data:       fjson,
	}
	cl.Add(addChange)
	err = applyChangelist(repo, cl)
	assert.NoError(t, err)
	assert.NotNil(t, repo.Targets["targets"].Signed.Targets["latest"])

	cl.Clear("")

	removeChange := &changelist.TufChange{
		Actn:       changelist.ActionDelete,
		Role:       changelist.ScopeTargets,
		ChangeType: "target",
		ChangePath: "latest",
		Data:       nil,
	}
	cl.Add(removeChange)
	err = applyChangelist(repo, cl)
	assert.NoError(t, err)
	_, ok := repo.Targets["targets"].Signed.Targets["latest"]
	assert.False(t, ok)
}
Esempio n. 14
0
// bootstrapRepo loads the repository from the local file system.  This attempts
// to load metadata for all roles.  Since server snapshots are supported,
// if the snapshot metadata fails to load, that's ok.
// This can also be unified with some cache reading tools from tuf/client.
// This assumes that bootstrapRepo is only used by Publish()
func (r *NotaryRepository) bootstrapRepo() error {
	kdb := keys.NewDB()
	tufRepo := tuf.NewRepo(kdb, r.CryptoService)

	logrus.Debugf("Loading trusted collection.")
	rootJSON, err := r.fileStore.GetMeta("root", 0)
	if err != nil {
		return err
	}
	root := &data.SignedRoot{}
	err = json.Unmarshal(rootJSON, root)
	if err != nil {
		return err
	}
	err = tufRepo.SetRoot(root)
	if err != nil {
		return err
	}
	targetsJSON, err := r.fileStore.GetMeta("targets", 0)
	if err != nil {
		return err
	}
	targets := &data.SignedTargets{}
	err = json.Unmarshal(targetsJSON, targets)
	if err != nil {
		return err
	}
	tufRepo.SetTargets("targets", targets)

	snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0)
	if err == nil {
		snapshot := &data.SignedSnapshot{}
		err = json.Unmarshal(snapshotJSON, snapshot)
		if err != nil {
			return err
		}
		tufRepo.SetSnapshot(snapshot)
	} else if _, ok := err.(store.ErrMetaNotFound); !ok {
		return err
	}

	r.tufRepo = tufRepo

	return nil
}
Esempio n. 15
0
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
// Note that we will not delete any private key material from local storage
func (r *NotaryRepository) DeleteTrustData(deleteRemote bool) error {
	// Clear local TUF files and cache
	if err := r.fileStore.RemoveAll(); err != nil {
		return fmt.Errorf("error clearing TUF repo data: %v", err)
	}
	r.tufRepo = tuf.NewRepo(nil)

	// Note that this will require admin permission in this NotaryRepository's roundtripper
	if deleteRemote {
		remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
		if err != nil {
			return err
		}
		if err := remote.RemoveAll(); err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 16
0
func TestSizeMismatchShort(t *testing.T) {
	repo := tuf.NewRepo(nil)
	localStorage := store.NewMemoryStore(nil)
	remoteStorage := testutils.NewShortMemoryStore(nil)
	client := NewClient(repo, remoteStorage, localStorage)

	sampleTargets := data.NewTargets()
	orig, err := json.Marshal(sampleTargets)
	origSha256 := sha256.Sum256(orig)
	assert.NoError(t, err)
	l := int64(len(orig))

	remoteStorage.SetMeta("targets", orig)

	_, _, err = client.downloadSigned("targets", l, origSha256[:])
	// size just limits the data received, the error is caught
	// either during checksum verification or during json deserialization
	assert.IsType(t, ErrChecksumMismatch{}, err)
}
Esempio n. 17
0
// DeleteTrustData removes the trust data stored for this repo in the TUF cache and certificate store on the client side
func (r *NotaryRepository) DeleteTrustData() error {
	// Clear TUF files and cache
	if err := r.fileStore.RemoveAll(); err != nil {
		return fmt.Errorf("error clearing TUF repo data: %v", err)
	}
	r.tufRepo = tuf.NewRepo(nil)
	// Clear certificates
	certificates, err := r.CertStore.GetCertificatesByCN(r.gun)
	if err != nil {
		// If there were no certificates to delete, we're done
		if _, ok := err.(*trustmanager.ErrNoCertificatesFound); ok {
			return nil
		}
		return fmt.Errorf("error retrieving certificates for %s: %v", r.gun, err)
	}
	for _, cert := range certificates {
		if err := r.CertStore.RemoveCert(cert); err != nil {
			return fmt.Errorf("error removing certificate: %v: %v", cert, err)
		}
	}
	return nil
}
Esempio n. 18
0
// EmptyRepo creates an in memory key database, crypto service
// and initializes a repo with no targets.  Delegations are only created
// if delegation roles are passed in.
func EmptyRepo(gun string, delegationRoles ...string) (*keys.KeyDB, *tuf.Repo, signed.CryptoService, error) {
	cs := cryptoservice.NewCryptoService(
		gun, trustmanager.NewKeyMemoryStore(passphrase.ConstantRetriever("")))
	kdb := keys.NewDB()
	r := tuf.NewRepo(kdb, cs)

	for _, role := range data.BaseRoles {
		key, err := createKey(cs, gun, role)
		if err != nil {
			return nil, nil, nil, err
		}
		role, _ := data.NewRole(role, 1, []string{key.ID()}, nil, nil)
		kdb.AddKey(key)
		kdb.AddRole(role)
	}

	r.InitRepo(false)

	// sort the delegation roles so that we make sure to create the parents
	// first
	sort.Strings(delegationRoles)
	for _, delgName := range delegationRoles {
		// create a delegations key and a delegation in the tuf repo
		delgKey, err := createKey(cs, gun, delgName)
		if err != nil {
			return nil, nil, nil, err
		}
		role, err := data.NewRole(delgName, 1, []string{}, []string{""}, []string{})
		if err != nil {
			return nil, nil, nil, err
		}
		if err := r.UpdateDelegations(role, []data.PublicKey{delgKey}); err != nil {
			return nil, nil, nil, err
		}
	}

	return kdb, r, cs, nil
}
Esempio n. 19
0
func TestValidateTargetsParentNotFound(t *testing.T) {
	_, baseRepo, cs, err := testutils.EmptyRepo("docker.com/notary")
	assert.NoError(t, err)
	store := storage.NewMemStorage()

	k, err := cs.Create("targets/level1", data.ED25519Key)
	assert.NoError(t, err)
	r, err := data.NewRole("targets/level1", 1, []string{k.ID()}, []string{""}, nil)
	assert.NoError(t, err)

	baseRepo.UpdateDelegations(r, []data.PublicKey{k})

	// no targets file is created for the new delegations, so force one
	baseRepo.InitTargets("targets/level1")

	// generate the update object we're doing to use to call loadAndValidateTargets
	del, err := baseRepo.SignTargets("targets/level1", data.DefaultExpires(data.CanonicalTargetsRole))
	assert.NoError(t, err)
	delJSON, err := json.Marshal(del)
	assert.NoError(t, err)

	delUpdate := storage.MetaUpdate{
		Role:    "targets/level1",
		Version: 1,
		Data:    delJSON,
	}

	roles := map[string]storage.MetaUpdate{"targets/level1": delUpdate}

	kdb := keys.NewDB()
	valRepo := tuf.NewRepo(kdb, nil)
	valRepo.SetRoot(baseRepo.Root)

	_, err = loadAndValidateTargets("gun", valRepo, roles, kdb, store)
	assert.Error(t, err)
	assert.IsType(t, storage.ErrNotFound{}, err)
}
Esempio n. 20
0
func TestCheckRootExpired(t *testing.T) {
	repo := tuf.NewRepo(nil, nil)
	storage := store.NewMemoryStore(nil, nil)
	client := NewClient(repo, storage, nil, storage)

	root := &data.SignedRoot{}
	root.Signed.Expires = time.Now().AddDate(-1, 0, 0)

	signedRoot, err := root.ToSigned()
	assert.NoError(t, err)
	rootJSON, err := json.Marshal(signedRoot)
	assert.NoError(t, err)

	rootHash := sha256.Sum256(rootJSON)

	testSnap := &data.SignedSnapshot{
		Signed: data.Snapshot{
			Meta: map[string]data.FileMeta{
				"root": {
					Length: int64(len(rootJSON)),
					Hashes: map[string][]byte{
						"sha256": rootHash[:],
					},
				},
			},
		},
	}
	repo.SetRoot(root)
	repo.SetSnapshot(testSnap)

	storage.SetMeta("root", rootJSON)

	err = client.checkRoot()
	assert.Error(t, err)
	assert.IsType(t, tuf.ErrLocalRootExpired{}, err)
}
Esempio n. 21
0
// ### generateSnapshot tests ###
func TestGenerateSnapshotNoRole(t *testing.T) {
	repo := tuf.NewRepo(nil)
	_, err := generateSnapshot("gun", repo, nil)
	assert.Error(t, err)
	assert.IsType(t, validation.ErrBadRoot{}, err)
}
Esempio n. 22
0
// validateUpload checks that the updates being pushed
// are semantically correct and the signatures are correct
func validateUpdate(gun string, updates []storage.MetaUpdate, store storage.MetaStore) error {
	kdb := keys.NewDB()
	repo := tuf.NewRepo(kdb, nil)
	rootRole := data.RoleName(data.CanonicalRootRole)
	targetsRole := data.RoleName(data.CanonicalTargetsRole)
	snapshotRole := data.RoleName(data.CanonicalSnapshotRole)

	// check that the necessary roles are present:
	roles := make(map[string]storage.MetaUpdate)
	for _, v := range updates {
		roles[v.Role] = v
	}
	if err := hierarchyOK(roles); err != nil {
		logrus.Error("ErrBadHierarchy: ", err.Error())
		return ErrBadHierarchy{msg: err.Error()}
	}
	logrus.Debug("Successfully validated hierarchy")

	var root *data.SignedRoot
	oldRootJSON, err := store.GetCurrent(gun, rootRole)
	if _, ok := err.(*storage.ErrNotFound); err != nil && !ok {
		// problem with storage. No expectation we can
		// write if we can't read so bail.
		logrus.Error("error reading previous root: ", err.Error())
		return err
	}
	if rootUpdate, ok := roles[rootRole]; ok {
		// if root is present, validate its integrity, possibly
		// against a previous root
		if root, err = validateRoot(gun, oldRootJSON, rootUpdate.Data); err != nil {
			logrus.Error("ErrBadRoot: ", err.Error())
			return ErrBadRoot{msg: err.Error()}
		}
		// setting root will update keys db
		if err = repo.SetRoot(root); err != nil {
			logrus.Error("ErrValidation: ", err.Error())
			return ErrValidation{msg: err.Error()}
		}
		logrus.Debug("Successfully validated root")
	} else {
		if oldRootJSON == nil {
			return ErrValidation{msg: "no pre-existing root and no root provided in update."}
		}
		parsedOldRoot := &data.SignedRoot{}
		if err := json.Unmarshal(oldRootJSON, parsedOldRoot); err != nil {
			return ErrValidation{msg: "pre-existing root is corrupted and no root provided in update."}
		}
		if err = repo.SetRoot(parsedOldRoot); err != nil {
			logrus.Error("ErrValidation: ", err.Error())
			return ErrValidation{msg: err.Error()}
		}
	}

	// TODO: validate delegated targets roles.
	var t *data.SignedTargets
	if _, ok := roles[targetsRole]; ok {
		if t, err = validateTargets(targetsRole, roles, kdb); err != nil {
			logrus.Error("ErrBadTargets: ", err.Error())
			return ErrBadTargets{msg: err.Error()}
		}
		repo.SetTargets(targetsRole, t)
	}
	logrus.Debug("Successfully validated targets")

	var oldSnap *data.SignedSnapshot
	oldSnapJSON, err := store.GetCurrent(gun, snapshotRole)
	if _, ok := err.(*storage.ErrNotFound); err != nil && !ok {
		// problem with storage. No expectation we can
		// write if we can't read so bail.
		logrus.Error("error reading previous snapshot: ", err.Error())
		return err
	} else if err == nil {
		oldSnap = &data.SignedSnapshot{}
		if err := json.Unmarshal(oldSnapJSON, oldSnap); err != nil {
			oldSnap = nil
		}
	}

	if err := validateSnapshot(snapshotRole, oldSnap, roles[snapshotRole], roles, kdb); err != nil {
		logrus.Error("ErrBadSnapshot: ", err.Error())
		return ErrBadSnapshot{msg: err.Error()}
	}
	logrus.Debug("Successfully validated snapshot")
	return nil
}
Esempio n. 23
0
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
// anchor for a repository. The checkInitialized argument indicates whether
// we should always attempt to contact the server to determine if the repository
// is initialized or not. If set to true, we will always attempt to download
// and return an error if the remote repository errors.
func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
	var (
		rootJSON   []byte
		err        error
		signedRoot *data.SignedRoot
	)
	// try to read root from cache first. We will trust this root
	// until we detect a problem during update which will cause
	// us to download a new root and perform a rotation.
	rootJSON, cachedRootErr := r.fileStore.GetMeta("root", maxSize)

	if cachedRootErr == nil {
		signedRoot, cachedRootErr = r.validateRoot(rootJSON)
	}

	remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
	if remoteErr != nil {
		logrus.Error(remoteErr)
	} else if cachedRootErr != nil || checkInitialized {
		// remoteErr was nil and we had a cachedRootErr (or are specifically
		// checking for initialization of the repo).

		// if remote store successfully set up, try and get root from remote
		tmpJSON, err := remote.GetMeta("root", maxSize)
		if err != nil {
			// we didn't have a root in cache and were unable to load one from
			// the server. Nothing we can do but error.
			return nil, err
		}
		if cachedRootErr != nil {
			// we always want to use the downloaded root if there was a cache
			// error.
			signedRoot, err = r.validateRoot(tmpJSON)
			if err != nil {
				return nil, err
			}

			err = r.fileStore.SetMeta("root", tmpJSON)
			if err != nil {
				// if we can't write cache we should still continue, just log error
				logrus.Errorf("could not save root to cache: %s", err.Error())
			}
		}
	}

	kdb := keys.NewDB()
	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)

	if signedRoot == nil {
		return nil, ErrRepoNotInitialized{}
	}

	err = r.tufRepo.SetRoot(signedRoot)
	if err != nil {
		return nil, err
	}

	return tufclient.NewClient(
		r.tufRepo,
		remote,
		kdb,
		r.fileStore,
	), nil
}
Esempio n. 24
0
// validateUpload checks that the updates being pushed
// are semantically correct and the signatures are correct
// A list of possibly modified updates are returned if all
// validation was successful. This allows the snapshot to be
// created and added if snapshotting has been delegated to the
// server
func validateUpdate(cs signed.CryptoService, gun string, updates []storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
	repo := tuf.NewRepo(cs)
	rootRole := data.CanonicalRootRole
	snapshotRole := data.CanonicalSnapshotRole

	// some delegated targets role may be invalid based on other updates
	// that have been made by other clients. We'll rebuild the slice of
	// updates with only the things we should actually update
	updatesToApply := make([]storage.MetaUpdate, 0, len(updates))

	roles := make(map[string]storage.MetaUpdate)
	for _, v := range updates {
		roles[v.Role] = v
	}

	var root *data.SignedRoot
	oldRootJSON, err := store.GetCurrent(gun, rootRole)
	if _, ok := err.(storage.ErrNotFound); err != nil && !ok {
		// problem with storage. No expectation we can
		// write if we can't read so bail.
		logrus.Error("error reading previous root: ", err.Error())
		return nil, err
	}
	if rootUpdate, ok := roles[rootRole]; ok {
		// if root is present, validate its integrity, possibly
		// against a previous root
		if root, err = validateRoot(gun, oldRootJSON, rootUpdate.Data, store); err != nil {
			logrus.Error("ErrBadRoot: ", err.Error())
			return nil, validation.ErrBadRoot{Msg: err.Error()}
		}

		// setting root will update keys db
		if err = repo.SetRoot(root); err != nil {
			logrus.Error("ErrValidation: ", err.Error())
			return nil, validation.ErrValidation{Msg: err.Error()}
		}
		logrus.Debug("Successfully validated root")
		updatesToApply = append(updatesToApply, rootUpdate)
	} else {
		if oldRootJSON == nil {
			return nil, validation.ErrValidation{Msg: "no pre-existing root and no root provided in update."}
		}
		parsedOldRoot := &data.SignedRoot{}
		if err := json.Unmarshal(oldRootJSON, parsedOldRoot); err != nil {
			return nil, validation.ErrValidation{Msg: "pre-existing root is corrupted and no root provided in update."}
		}
		if err = repo.SetRoot(parsedOldRoot); err != nil {
			logrus.Error("ErrValidation: ", err.Error())
			return nil, validation.ErrValidation{Msg: err.Error()}
		}
	}

	targetsToUpdate, err := loadAndValidateTargets(gun, repo, roles, store)
	if err != nil {
		return nil, err
	}
	updatesToApply = append(updatesToApply, targetsToUpdate...)

	// there's no need to load files from the database if no targets etc...
	// were uploaded because that means they haven't been updated and
	// the snapshot will already contain the correct hashes and sizes for
	// those targets (incl. delegated targets)
	logrus.Debug("Successfully validated targets")

	// At this point, root and targets must have been loaded into the repo
	if _, ok := roles[snapshotRole]; ok {
		var oldSnap *data.SignedSnapshot
		oldSnapJSON, err := store.GetCurrent(gun, snapshotRole)
		if _, ok := err.(storage.ErrNotFound); err != nil && !ok {
			// problem with storage. No expectation we can
			// write if we can't read so bail.
			logrus.Error("error reading previous snapshot: ", err.Error())
			return nil, err
		} else if err == nil {
			oldSnap = &data.SignedSnapshot{}
			if err := json.Unmarshal(oldSnapJSON, oldSnap); err != nil {
				oldSnap = nil
			}
		}

		if err := validateSnapshot(snapshotRole, oldSnap, roles[snapshotRole], roles, repo); err != nil {
			logrus.Error("ErrBadSnapshot: ", err.Error())
			return nil, validation.ErrBadSnapshot{Msg: err.Error()}
		}
		logrus.Debug("Successfully validated snapshot")
		updatesToApply = append(updatesToApply, roles[snapshotRole])
	} else {
		// Check:
		//   - we have a snapshot key
		//   - it matches a snapshot key signed into the root.json
		// Then:
		//   - generate a new snapshot
		//   - add it to the updates
		update, err := generateSnapshot(gun, repo, store)
		if err != nil {
			return nil, err
		}
		updatesToApply = append(updatesToApply, *update)
	}
	return updatesToApply, nil
}
Esempio n. 25
0
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository.
func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
	privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
	if err != nil {
		return err
	}

	// currently we only support server managing timestamps and snapshots, and
	// nothing else - timestamps are always managed by the server, and implicit
	// (do not have to be passed in as part of `serverManagedRoles`, so that
	// the API of Initialize doens't change).
	var serverManagesSnapshot bool
	locallyManagedKeys := []string{
		data.CanonicalTargetsRole,
		data.CanonicalSnapshotRole,
		// root is also locally managed, but that should have been created
		// already
	}
	remotelyManagedKeys := []string{data.CanonicalTimestampRole}
	for _, role := range serverManagedRoles {
		switch role {
		case data.CanonicalTimestampRole:
			continue // timestamp is already in the right place
		case data.CanonicalSnapshotRole:
			// because we put Snapshot last
			locallyManagedKeys = []string{data.CanonicalTargetsRole}
			remotelyManagedKeys = append(
				remotelyManagedKeys, data.CanonicalSnapshotRole)
			serverManagesSnapshot = true
		default:
			return ErrInvalidRemoteRole{Role: role}
		}
	}

	// Hard-coded policy: the generated certificate expires in 10 years.
	startTime := time.Now()
	rootCert, err := cryptoservice.GenerateCertificate(
		privKey, r.gun, startTime, startTime.AddDate(10, 0, 0))

	if err != nil {
		return err
	}
	r.CertManager.AddTrustedCert(rootCert)

	// The root key gets stored in the TUF metadata X509 encoded, linking
	// the tuf root.json to our X509 PKI.
	// If the key is RSA, we store it as type RSAx509, if it is ECDSA we store it
	// as ECDSAx509 to allow the gotuf verifiers to correctly decode the
	// key on verification of signatures.
	var rootKey data.PublicKey
	switch privKey.Algorithm() {
	case data.RSAKey:
		rootKey = data.NewRSAx509PublicKey(trustmanager.CertToPEM(rootCert))
	case data.ECDSAKey:
		rootKey = data.NewECDSAx509PublicKey(trustmanager.CertToPEM(rootCert))
	default:
		return fmt.Errorf("invalid format for root key: %s", privKey.Algorithm())
	}

	kdb := keys.NewDB()
	err = addKeyForRole(kdb, data.CanonicalRootRole, rootKey)
	if err != nil {
		return err
	}

	// we want to create all the local keys first so we don't have to
	// make unnecessary network calls
	for _, role := range locallyManagedKeys {
		// This is currently hardcoding the keys to ECDSA.
		key, err := r.CryptoService.Create(role, data.ECDSAKey)
		if err != nil {
			return err
		}
		if err := addKeyForRole(kdb, role, key); err != nil {
			return err
		}
	}
	for _, role := range remotelyManagedKeys {
		// This key is generated by the remote server.
		key, err := getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
		if err != nil {
			return err
		}
		logrus.Debugf("got remote %s %s key with keyID: %s",
			role, key.Algorithm(), key.ID())
		if err := addKeyForRole(kdb, role, key); err != nil {
			return err
		}
	}

	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)

	err = r.tufRepo.InitRoot(false)
	if err != nil {
		logrus.Debug("Error on InitRoot: ", err.Error())
		return err
	}
	_, err = r.tufRepo.InitTargets(data.CanonicalTargetsRole)
	if err != nil {
		logrus.Debug("Error on InitTargets: ", err.Error())
		return err
	}
	err = r.tufRepo.InitSnapshot()
	if err != nil {
		logrus.Debug("Error on InitSnapshot: ", err.Error())
		return err
	}

	return r.saveMetadata(serverManagesSnapshot)
}
Esempio n. 26
0
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository.
func (r *NotaryRepository) Initialize(rootKeyID string) error {
	privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
	if err != nil {
		return err
	}

	rootCert, err := cryptoservice.GenerateCertificate(privKey, r.gun)

	if err != nil {
		return err
	}
	r.KeyStoreManager.AddTrustedCert(rootCert)

	// The root key gets stored in the TUF metadata X509 encoded, linking
	// the tuf root.json to our X509 PKI.
	// If the key is RSA, we store it as type RSAx509, if it is ECDSA we store it
	// as ECDSAx509 to allow the gotuf verifiers to correctly decode the
	// key on verification of signatures.
	var rootKey data.PublicKey
	switch privKey.Algorithm() {
	case data.RSAKey:
		rootKey = data.NewRSAx509PublicKey(trustmanager.CertToPEM(rootCert))
	case data.ECDSAKey:
		rootKey = data.NewECDSAx509PublicKey(trustmanager.CertToPEM(rootCert))
	default:
		return fmt.Errorf("invalid format for root key: %s", privKey.Algorithm())
	}

	// All the timestamp keys are generated by the remote server.
	remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
	if err != nil {
		return err
	}
	rawTSKey, err := remote.GetKey("timestamp")
	if err != nil {
		return err
	}

	timestampKey, err := data.UnmarshalPublicKey(rawTSKey)
	if err != nil {
		return err
	}

	logrus.Debugf("got remote %s timestamp key with keyID: %s", timestampKey.Algorithm(), timestampKey.ID())

	// This is currently hardcoding the targets and snapshots keys to ECDSA
	// Targets and snapshot keys are always generated locally.
	targetsKey, err := r.CryptoService.Create("targets", data.ECDSAKey)
	if err != nil {
		return err
	}
	snapshotKey, err := r.CryptoService.Create("snapshot", data.ECDSAKey)
	if err != nil {
		return err
	}

	kdb := keys.NewDB()

	kdb.AddKey(rootKey)
	kdb.AddKey(targetsKey)
	kdb.AddKey(snapshotKey)
	kdb.AddKey(timestampKey)

	err = initRoles(kdb, rootKey, targetsKey, snapshotKey, timestampKey)
	if err != nil {
		return err
	}

	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)

	err = r.tufRepo.InitRoot(false)
	if err != nil {
		logrus.Debug("Error on InitRoot: ", err.Error())
		switch err.(type) {
		case tuferrors.ErrInsufficientSignatures, trustmanager.ErrPasswordInvalid:
		default:
			return err
		}
	}
	err = r.tufRepo.InitTargets()
	if err != nil {
		logrus.Debug("Error on InitTargets: ", err.Error())
		return err
	}
	err = r.tufRepo.InitSnapshot()
	if err != nil {
		logrus.Debug("Error on InitSnapshot: ", err.Error())
		return err
	}

	return r.saveMetadata()
}
Esempio n. 27
0
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository. The server must be reachable (and is asked to generate a
// timestamp key and possibly other serverManagedRoles), but the created repository
// result is only stored on local disk, not published to the server. To do that,
// use r.Publish() eventually.
func (r *NotaryRepository) Initialize(rootKeyID string, serverManagedRoles ...string) error {
	privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID)
	if err != nil {
		return err
	}

	// currently we only support server managing timestamps and snapshots, and
	// nothing else - timestamps are always managed by the server, and implicit
	// (do not have to be passed in as part of `serverManagedRoles`, so that
	// the API of Initialize doesn't change).
	var serverManagesSnapshot bool
	locallyManagedKeys := []string{
		data.CanonicalTargetsRole,
		data.CanonicalSnapshotRole,
		// root is also locally managed, but that should have been created
		// already
	}
	remotelyManagedKeys := []string{data.CanonicalTimestampRole}
	for _, role := range serverManagedRoles {
		switch role {
		case data.CanonicalTimestampRole:
			continue // timestamp is already in the right place
		case data.CanonicalSnapshotRole:
			// because we put Snapshot last
			locallyManagedKeys = []string{data.CanonicalTargetsRole}
			remotelyManagedKeys = append(
				remotelyManagedKeys, data.CanonicalSnapshotRole)
			serverManagesSnapshot = true
		default:
			return ErrInvalidRemoteRole{Role: role}
		}
	}

	rootKey, err := rootCertKey(r.gun, privKey)
	if err != nil {
		return err
	}

	var (
		rootRole = data.NewBaseRole(
			data.CanonicalRootRole,
			notary.MinThreshold,
			rootKey,
		)
		timestampRole data.BaseRole
		snapshotRole  data.BaseRole
		targetsRole   data.BaseRole
	)

	// we want to create all the local keys first so we don't have to
	// make unnecessary network calls
	for _, role := range locallyManagedKeys {
		// This is currently hardcoding the keys to ECDSA.
		key, err := r.CryptoService.Create(role, r.gun, data.ECDSAKey)
		if err != nil {
			return err
		}
		switch role {
		case data.CanonicalSnapshotRole:
			snapshotRole = data.NewBaseRole(
				role,
				notary.MinThreshold,
				key,
			)
		case data.CanonicalTargetsRole:
			targetsRole = data.NewBaseRole(
				role,
				notary.MinThreshold,
				key,
			)
		}
	}
	for _, role := range remotelyManagedKeys {
		// This key is generated by the remote server.
		key, err := getRemoteKey(r.baseURL, r.gun, role, r.roundTrip)
		if err != nil {
			return err
		}
		logrus.Debugf("got remote %s %s key with keyID: %s",
			role, key.Algorithm(), key.ID())
		switch role {
		case data.CanonicalSnapshotRole:
			snapshotRole = data.NewBaseRole(
				role,
				notary.MinThreshold,
				key,
			)
		case data.CanonicalTimestampRole:
			timestampRole = data.NewBaseRole(
				role,
				notary.MinThreshold,
				key,
			)
		}
	}

	r.tufRepo = tuf.NewRepo(r.CryptoService)

	err = r.tufRepo.InitRoot(
		rootRole,
		timestampRole,
		snapshotRole,
		targetsRole,
		false,
	)
	if err != nil {
		logrus.Debug("Error on InitRoot: ", err.Error())
		return err
	}
	_, err = r.tufRepo.InitTargets(data.CanonicalTargetsRole)
	if err != nil {
		logrus.Debug("Error on InitTargets: ", err.Error())
		return err
	}
	err = r.tufRepo.InitSnapshot()
	if err != nil {
		logrus.Debug("Error on InitSnapshot: ", err.Error())
		return err
	}

	return r.saveMetadata(serverManagesSnapshot)
}
Esempio n. 28
0
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository. The server must be reachable (and is asked to generate a
// timestamp key and possibly other serverManagedRoles), but the created repository
// result is only stored on local disk, not published to the server. To do that,
// use r.Publish() eventually.
func (r *NotaryRepository) Initialize(rootKeyIDs []string, serverManagedRoles ...string) error {
	privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs))
	for _, keyID := range rootKeyIDs {
		privKey, _, err := r.CryptoService.GetPrivateKey(keyID)
		if err != nil {
			return err
		}
		privKeys = append(privKeys, privKey)
	}

	// currently we only support server managing timestamps and snapshots, and
	// nothing else - timestamps are always managed by the server, and implicit
	// (do not have to be passed in as part of `serverManagedRoles`, so that
	// the API of Initialize doesn't change).
	var serverManagesSnapshot bool
	locallyManagedKeys := []string{
		data.CanonicalTargetsRole,
		data.CanonicalSnapshotRole,
		// root is also locally managed, but that should have been created
		// already
	}
	remotelyManagedKeys := []string{data.CanonicalTimestampRole}
	for _, role := range serverManagedRoles {
		switch role {
		case data.CanonicalTimestampRole:
			continue // timestamp is already in the right place
		case data.CanonicalSnapshotRole:
			// because we put Snapshot last
			locallyManagedKeys = []string{data.CanonicalTargetsRole}
			remotelyManagedKeys = append(
				remotelyManagedKeys, data.CanonicalSnapshotRole)
			serverManagesSnapshot = true
		default:
			return ErrInvalidRemoteRole{Role: role}
		}
	}

	rootKeys := make([]data.PublicKey, 0, len(privKeys))
	for _, privKey := range privKeys {
		rootKey, err := rootCertKey(r.gun, privKey)
		if err != nil {
			return err
		}
		rootKeys = append(rootKeys, rootKey)
	}

	rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles(
		rootKeys,
		locallyManagedKeys,
		remotelyManagedKeys,
	)
	if err != nil {
		return err
	}

	r.tufRepo = tuf.NewRepo(r.CryptoService)

	if err := r.tufRepo.InitRoot(
		rootRole,
		timestampRole,
		snapshotRole,
		targetsRole,
		false,
	); err != nil {
		logrus.Debug("Error on InitRoot: ", err.Error())
		return err
	}
	if _, err := r.tufRepo.InitTargets(data.CanonicalTargetsRole); err != nil {
		logrus.Debug("Error on InitTargets: ", err.Error())
		return err
	}
	if err := r.tufRepo.InitSnapshot(); err != nil {
		logrus.Debug("Error on InitSnapshot: ", err.Error())
		return err
	}

	return r.saveMetadata(serverManagesSnapshot)
}