// GetBaseRole gets a base role from this repo's metadata func (tr *Repo) GetBaseRole(name string) (data.BaseRole, error) { if !data.ValidRole(name) { return data.BaseRole{}, data.ErrInvalidRole{Role: name, Reason: "invalid base role name"} } if tr.Root == nil { return data.BaseRole{}, ErrNotLoaded{data.CanonicalRootRole} } roleData, ok := tr.Root.Signed.Roles[name] if !ok { return data.BaseRole{}, data.ErrInvalidRole{Role: name, Reason: "role not found in root file"} } // Get all public keys for the base role from TUF metadata keyIDs := roleData.KeyIDs pubKeys := make(map[string]data.PublicKey) for _, keyID := range keyIDs { pubKey, ok := tr.Root.Signed.Keys[keyID] if !ok { return data.BaseRole{}, data.ErrInvalidRole{ Role: name, Reason: fmt.Sprintf("key with ID %s was not found in root metadata", keyID), } } pubKeys[keyID] = pubKey } return data.BaseRole{ Name: name, Keys: pubKeys, Threshold: roleData.Threshold, }, nil }
// get all the saved previous roles <= the current root version func (tr *Repo) getOldRootRoles() versionedRootRoles { oldRootRoles := make(versionedRootRoles, 0, len(tr.Root.Signed.Roles)) // now go through the old roles for roleName := range tr.Root.Signed.Roles { // ensure that the rolename matches our format and that the version is // not too high if data.ValidRole(roleName) { continue } nameTokens := strings.Split(roleName, ".") if len(nameTokens) != 2 || nameTokens[0] != data.CanonicalRootRole { continue } version, err := strconv.Atoi(nameTokens[1]) if err != nil || version > tr.Root.Signed.Version { continue } // ignore invalid roles, which shouldn't happen oldRole, err := tr.Root.BuildBaseRole(roleName) if err != nil { continue } oldRootRoles = append(oldRootRoles, versionedRootRole{BaseRole: oldRole, version: version}) } return oldRootRoles }
func (rb *repoBuilder) Load(roleName string, content []byte, minVersion int, allowExpired bool) error { if !data.ValidRole(roleName) { return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)} } if rb.IsLoaded(roleName) { return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)} } var err error switch roleName { case data.CanonicalRootRole: break case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole: err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole}) default: // delegations err = rb.checkPrereqsLoaded([]string{data.CanonicalRootRole, data.CanonicalTargetsRole}) } if err != nil { return err } switch roleName { case data.CanonicalRootRole: return rb.loadRoot(content, minVersion, allowExpired) case data.CanonicalSnapshotRole: return rb.loadSnapshot(content, minVersion, allowExpired) case data.CanonicalTimestampRole: return rb.loadTimestamp(content, minVersion, allowExpired) case data.CanonicalTargetsRole: return rb.loadTargets(content, minVersion, allowExpired) default: return rb.loadDelegation(roleName, content, minVersion, allowExpired) } }
func atomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { gun := vars["imageName"] s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } cryptoServiceVal := ctx.Value("cryptoService") cryptoService, ok := cryptoServiceVal.(signed.CryptoService) if !ok { return errors.ErrNoCryptoService.WithDetail(nil) } reader, err := r.MultipartReader() if err != nil { return errors.ErrMalformedUpload.WithDetail(nil) } var updates []storage.MetaUpdate for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") if role == "" { return errors.ErrNoFilename.WithDetail(nil) } else if !data.ValidRole(role) { return errors.ErrInvalidRole.WithDetail(role) } meta := &data.SignedMeta{} var input []byte inBuf := bytes.NewBuffer(input) dec := json.NewDecoder(io.TeeReader(part, inBuf)) err = dec.Decode(meta) if err != nil { return errors.ErrMalformedJSON.WithDetail(nil) } version := meta.Signed.Version updates = append(updates, storage.MetaUpdate{ Role: role, Version: version, Data: inBuf.Bytes(), }) } updates, err = validateUpdate(cryptoService, gun, updates, store) if err != nil { serializable, serializableError := validation.NewSerializableError(err) if serializableError != nil { return errors.ErrInvalidUpdate.WithDetail(nil) } return errors.ErrInvalidUpdate.WithDetail(serializable) } err = store.UpdateMany(gun, updates) if err != nil { return errors.ErrUpdating.WithDetail(nil) } return nil }
// RotateKey removes all existing keys associated with the role, and either // creates and adds one new key or delegates managing the key to the server. // These changes are staged in a changelist until publish is called. func (r *NotaryRepository) RotateKey(role string, serverManagesKey bool) error { // We currently support remotely managing timestamp and snapshot keys canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole // And locally managing root, targets, and snapshot keys canBeLocalKey := (role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole || role == data.CanonicalRootRole) switch { case !data.ValidRole(role) || data.IsDelegation(role): return fmt.Errorf("notary does not currently permit rotating the %s key", role) case serverManagesKey && !canBeRemoteKey: return ErrInvalidRemoteRole{Role: role} case !serverManagesKey && !canBeLocalKey: return ErrInvalidLocalRole{Role: role} } var ( pubKey data.PublicKey err error errFmtMsg string ) switch serverManagesKey { case true: pubKey, err = getRemoteKey(r.baseURL, r.gun, role, r.roundTrip) errFmtMsg = "unable to rotate remote key: %s" default: pubKey, err = r.CryptoService.Create(role, r.gun, data.ECDSAKey) errFmtMsg = "unable to generate key: %s" } if err != nil { return fmt.Errorf(errFmtMsg, err) } // if this is a root role, generate a root cert for the public key if role == data.CanonicalRootRole { privKey, _, err := r.CryptoService.GetPrivateKey(pubKey.ID()) if err != nil { return err } pubKey, err = rootCertKey(r.gun, privKey) if err != nil { return err } } cl := changelist.NewMemChangelist() if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKey); err != nil { return err } return r.publish(cl) }
// AddKey stores the contents of a PEM-encoded private key as a PEM block func (s *KeyMemoryStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error { s.Lock() defer s.Unlock() if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { keyInfo.Gun = "" } err := addKey(s, s.PassRetriever, s.cachedKeys, filepath.Join(keyInfo.Gun, privKey.ID()), keyInfo.Role, privKey) if err != nil { return err } s.keyInfoMap[privKey.ID()] = keyInfo return nil }
// AtomicUpdateHandler will accept multiple TUF files and ensure that the storage // backend is atomically updated with all the new records. func AtomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { defer r.Body.Close() s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] reader, err := r.MultipartReader() if err != nil { return errors.ErrMalformedUpload.WithDetail(nil) } var updates []storage.MetaUpdate for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") if role == "" { return errors.ErrNoFilename.WithDetail(nil) } else if !data.ValidRole(role) { return errors.ErrInvalidRole.WithDetail(role) } meta := &data.SignedMeta{} var input []byte inBuf := bytes.NewBuffer(input) dec := json.NewDecoder(io.TeeReader(part, inBuf)) err = dec.Decode(meta) if err != nil { return errors.ErrMalformedJSON.WithDetail(nil) } version := meta.Signed.Version updates = append(updates, storage.MetaUpdate{ Role: role, Version: version, Data: inBuf.Bytes(), }) } if err = validateUpdate(gun, updates, store); err != nil { return errors.ErrMalformedUpload.WithDetail(err) } err = store.UpdateMany(gun, updates) if err != nil { return errors.ErrUpdating.WithDetail(err) } return nil }
// GetBaseRole gets a base role from this repo's metadata func (tr *Repo) GetBaseRole(name string) (data.BaseRole, error) { if !data.ValidRole(name) { return data.BaseRole{}, data.ErrInvalidRole{Role: name, Reason: "invalid base role name"} } if tr.Root == nil { return data.BaseRole{}, ErrNotLoaded{data.CanonicalRootRole} } // Find the role data public keys for the base role from TUF metadata baseRole, err := tr.Root.BuildBaseRole(name) if err != nil { return data.BaseRole{}, err } return baseRole, nil }
func checkRotationInput(role string, serverManaged bool) error { // We currently support remotely managing timestamp and snapshot keys canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole // And locally managing root, targets, and snapshot keys canBeLocalKey := role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole || role == data.CanonicalRootRole switch { case !data.ValidRole(role) || data.IsDelegation(role): return fmt.Errorf("notary does not currently permit rotating the %s key", role) case serverManaged && !canBeRemoteKey: return ErrInvalidRemoteRole{Role: role} case !serverManaged && !canBeLocalKey: return ErrInvalidLocalRole{Role: role} } return nil }
// AddRole adds a role to the database. Any keys associated with the // role must have already been added. func (db *KeyDB) AddRole(r *data.Role) error { if !data.ValidRole(r.Name) { return data.ErrInvalidRole{Role: r.Name} } if r.Threshold < 1 { return ErrInvalidThreshold } // validate all key ids are in the keys maps for _, id := range r.KeyIDs { if _, ok := db.keys[id]; !ok { return ErrInvalidKeyID } } db.roles[r.Name] = r return nil }
// AddKey stores the contents of a PEM-encoded private key as a PEM block func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error { var ( chosenPassphrase string giveup bool err error pemPrivKey []byte ) s.Lock() defer s.Unlock() if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) { keyInfo.Gun = "" } keyID := privKey.ID() for attempts := 0; ; attempts++ { chosenPassphrase, giveup, err = s.PassRetriever(keyID, keyInfo.Role, true, attempts) if err == nil { break } if giveup || attempts > 10 { return ErrAttemptsExceeded{} } } if chosenPassphrase != "" { pemPrivKey, err = utils.EncryptPrivateKey(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase) } else { pemPrivKey, err = utils.KeyToPEM(privKey, keyInfo.Role, keyInfo.Gun) } if err != nil { return err } s.cachedKeys[keyID] = &cachedKey{alias: keyInfo.Role, key: privKey} err = s.store.Set(keyID, pemPrivKey) if err != nil { return err } s.keyInfoMap[privKey.ID()] = keyInfo return nil }
func testAddKeyWithRole(t *testing.T, role, expectedSubdir string) { gun := "docker.com/notary" testExt := "key" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") require.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Create our store store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) require.NoError(t, err, "failed to create new key filestore") privKey, err := GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, notary.PrivDir, expectedSubdir, privKey.ID()+"."+testExt) // Call the AddKey function err = store.AddKey(KeyInfo{Role: role, Gun: gun}, privKey) require.NoError(t, err, "failed to add key to store") // Check to see if file exists b, err := ioutil.ReadFile(expectedFilePath) require.NoError(t, err, "expected file not found") require.Contains(t, string(b), "-----BEGIN EC PRIVATE KEY-----") // Check that we have the role and gun info for this key's ID keyInfo, ok := store.keyInfoMap[privKey.ID()] require.True(t, ok) require.Equal(t, role, keyInfo.Role) if role == data.CanonicalRootRole || data.IsDelegation(role) || !data.ValidRole(role) { require.Empty(t, keyInfo.Gun) } else { require.Equal(t, gun, keyInfo.Gun) } }
func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string]yubiSlot, err error) { keys = make(map[string]yubiSlot) findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), //pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), } attrTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}), pkcs11.NewAttribute(pkcs11.CKA_VALUE, []byte{0}), } if err = ctx.FindObjectsInit(session, findTemplate); err != nil { logrus.Debugf("Failed to init: %s", err.Error()) return } objs, b, err := ctx.FindObjects(session, numSlots) for err == nil { var o []pkcs11.ObjectHandle o, b, err = ctx.FindObjects(session, numSlots) if err != nil { continue } if len(o) == 0 { break } objs = append(objs, o...) } if err != nil { logrus.Debugf("Failed to find: %s %v", err.Error(), b) if len(objs) == 0 { return nil, err } } if err = ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize: %s", err.Error()) return } if len(objs) == 0 { return nil, errors.New("No keys found in yubikey.") } logrus.Debugf("Found %d objects matching list filters", len(objs)) for _, obj := range objs { var ( cert *x509.Certificate slot []byte ) // Retrieve the public-key material to be able to create a new ECDSA attr, err := ctx.GetAttributeValue(session, obj, attrTemplate) if err != nil { logrus.Debugf("Failed to get Attribute for: %v", obj) continue } // Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues. for _, a := range attr { if a.Type == pkcs11.CKA_ID { slot = a.Value } if a.Type == pkcs11.CKA_VALUE { cert, err = x509.ParseCertificate(a.Value) if err != nil { continue } if !data.ValidRole(cert.Subject.CommonName) { continue } } } // we found nothing if cert == nil { continue } var ecdsaPubKey *ecdsa.PublicKey switch cert.PublicKeyAlgorithm { case x509.ECDSA: ecdsaPubKey = cert.PublicKey.(*ecdsa.PublicKey) default: logrus.Infof("Unsupported x509 PublicKeyAlgorithm: %d", cert.PublicKeyAlgorithm) continue } pubBytes, err := x509.MarshalPKIXPublicKey(ecdsaPubKey) if err != nil { logrus.Debugf("Failed to Marshal public key") continue } keys[data.NewECDSAPublicKey(pubBytes).ID()] = yubiSlot{ role: cert.Subject.CommonName, slotID: slot, } } return }
func atomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { gun := vars["imageName"] s := ctx.Value("metaStore") logger := ctxu.GetLoggerWithField(ctx, gun, "gun") store, ok := s.(storage.MetaStore) if !ok { logger.Error("500 POST unable to retrieve storage") return errors.ErrNoStorage.WithDetail(nil) } cryptoServiceVal := ctx.Value("cryptoService") cryptoService, ok := cryptoServiceVal.(signed.CryptoService) if !ok { logger.Error("500 POST unable to retrieve signing service") return errors.ErrNoCryptoService.WithDetail(nil) } reader, err := r.MultipartReader() if err != nil { return errors.ErrMalformedUpload.WithDetail(nil) } var updates []storage.MetaUpdate for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") if role == "" { return errors.ErrNoFilename.WithDetail(nil) } else if !data.ValidRole(role) { return errors.ErrInvalidRole.WithDetail(role) } meta := &data.SignedMeta{} var input []byte inBuf := bytes.NewBuffer(input) dec := json.NewDecoder(io.TeeReader(part, inBuf)) err = dec.Decode(meta) if err != nil { return errors.ErrMalformedJSON.WithDetail(nil) } version := meta.Signed.Version updates = append(updates, storage.MetaUpdate{ Role: role, Version: version, Data: inBuf.Bytes(), }) } updates, err = validateUpdate(cryptoService, gun, updates, store) if err != nil { serializable, serializableError := validation.NewSerializableError(err) if serializableError != nil { return errors.ErrInvalidUpdate.WithDetail(nil) } return errors.ErrInvalidUpdate.WithDetail(serializable) } err = store.UpdateMany(gun, updates) if err != nil { // If we have an old version error, surface to user with error code if _, ok := err.(storage.ErrOldVersion); ok { return errors.ErrOldVersion.WithDetail(err) } // More generic storage update error, possibly due to attempted rollback logger.Errorf("500 POST error applying update request: %v", err) return errors.ErrUpdating.WithDetail(nil) } return nil }
func TestKeyStoreInternalState(t *testing.T) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") require.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) gun := "docker.com/notary" // Mimic a notary repo setup, and test that bringing up a keyfilestore creates the correct keyInfoMap roles := []string{data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole, "targets/delegation"} // Keep track of the key IDs for each role, so we can validate later against the keystore state roleToID := make(map[string]string) for _, role := range roles { // generate a key for the role privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") var privKeyPEM []byte // generate the correct PEM role header if role == data.CanonicalRootRole || data.IsDelegation(role) || !data.ValidRole(role) { privKeyPEM, err = utils.KeyToPEM(privKey, role, "") } else { privKeyPEM, err = utils.KeyToPEM(privKey, role, gun) } require.NoError(t, err, "could not generate PEM") // write the key file to the correct location keyPath := filepath.Join(tempBaseDir, notary.PrivDir) keyPath = filepath.Join(keyPath, privKey.ID()) require.NoError(t, os.MkdirAll(filepath.Dir(keyPath), 0755)) require.NoError(t, ioutil.WriteFile(keyPath+".key", privKeyPEM, 0755)) roleToID[role] = privKey.ID() } store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) require.NoError(t, err) require.Len(t, store.keyInfoMap, 4) for _, role := range roles { keyID, _ := roleToID[role] // make sure this keyID is the right length require.Len(t, keyID, notary.SHA256HexSize) require.Equal(t, role, store.keyInfoMap[keyID].Role) // targets and snapshot keys should have a gun set, root and delegation keys should not if role == data.CanonicalTargetsRole || role == data.CanonicalSnapshotRole { require.Equal(t, gun, store.keyInfoMap[keyID].Gun) } else { require.Empty(t, store.keyInfoMap[keyID].Gun) } } // Try removing the targets key only by ID (no gun provided) require.NoError(t, store.RemoveKey(roleToID[data.CanonicalTargetsRole])) // The key file itself should have been removed _, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID[data.CanonicalTargetsRole]+".key")) require.Error(t, err) // The keyInfoMap should have also updated by deleting the key _, ok := store.keyInfoMap[roleToID[data.CanonicalTargetsRole]] require.False(t, ok) // Try removing the delegation key only by ID (no gun provided) require.NoError(t, store.RemoveKey(roleToID["targets/delegation"])) // The key file itself should have been removed _, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID["targets/delegation"]+".key")) require.Error(t, err) // The keyInfoMap should have also updated _, ok = store.keyInfoMap[roleToID["targets/delegation"]] require.False(t, ok) // Try removing the root key only by ID (no gun provided) require.NoError(t, store.RemoveKey(roleToID[data.CanonicalRootRole])) // The key file itself should have been removed _, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID[data.CanonicalRootRole]+".key")) require.Error(t, err) // The keyInfoMap should have also updated_ _, ok = store.keyInfoMap[roleToID[data.CanonicalRootRole]] require.False(t, ok) // Generate a new targets key and add it with its gun, check that the map gets updated back privKey, err := utils.GenerateECDSAKey(rand.Reader) require.NoError(t, err, "could not generate private key") require.NoError(t, store.AddKey(KeyInfo{Role: data.CanonicalTargetsRole, Gun: gun}, privKey)) require.Equal(t, gun, store.keyInfoMap[privKey.ID()].Gun) require.Equal(t, data.CanonicalTargetsRole, store.keyInfoMap[privKey.ID()].Role) }