示例#1
0
文件: deks.go 项目: harche/docker
// GetKeys returns the current set of DEKs.  If NeedsRotation is true, and there
// is no existing PendingDEK, it will try to create one.  If there are any errors
// doing so, just return the original.
func (r *RaftDEKManager) GetKeys() raft.EncryptionKeys {
	var newKeys, originalKeys raft.EncryptionKeys
	err := r.kw.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) {
		data, ok := h.(RaftDEKData)
		if !ok {
			return nil, errNotUsingRaftDEKData
		}
		originalKeys = data.EncryptionKeys
		if !data.NeedsRotation || data.PendingDEK != nil {
			return nil, errNoUpdateNeeded
		}
		newKeys = raft.EncryptionKeys{
			CurrentDEK: data.CurrentDEK,
			PendingDEK: encryption.GenerateSecretKey(),
		}
		return RaftDEKData{EncryptionKeys: newKeys}, nil
	})
	if err != nil {
		return originalKeys
	}
	return newKeys
}
示例#2
0
文件: deks.go 项目: harche/docker
// NewRaftDEKManager returns a RaftDEKManager that uses the current key writer
// and header manager
func NewRaftDEKManager(kw ca.KeyWriter) (*RaftDEKManager, error) {
	// If there is no current DEK, generate one and write it to disk
	err := kw.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) {
		dekData, ok := h.(RaftDEKData)
		// it wasn't a raft DEK manager before - just replace it
		if !ok || dekData.CurrentDEK == nil {
			return RaftDEKData{
				EncryptionKeys: raft.EncryptionKeys{
					CurrentDEK: encryption.GenerateSecretKey(),
				},
			}, nil
		}
		return nil, errNoUpdateNeeded
	})
	if err != nil && err != errNoUpdateNeeded {
		return nil, err
	}
	return &RaftDEKManager{
		kw:         kw,
		rotationCh: make(chan struct{}, 1),
	}, nil
}
示例#3
0
文件: node.go 项目: docker/swarmkit
func (n *Node) loadSecurityConfig(ctx context.Context) (*ca.SecurityConfig, error) {
	paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory))
	var securityConfig *ca.SecurityConfig

	krw := ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
	if err := krw.Migrate(); err != nil {
		return nil, err
	}

	// Check if we already have a valid certificates on disk.
	rootCA, err := ca.GetLocalRootCA(paths.RootCA)
	if err != nil && err != ca.ErrNoLocalRootCA {
		return nil, err
	}
	if err == nil {
		securityConfig, err = ca.LoadSecurityConfig(ctx, rootCA, krw)
		if err != nil {
			_, isInvalidKEK := errors.Cause(err).(ca.ErrInvalidKEK)
			if isInvalidKEK {
				return nil, ErrInvalidUnlockKey
			} else if !os.IsNotExist(err) {
				return nil, errors.Wrapf(err, "error while loading TLS certificate in %s", paths.Node.Cert)
			}
		}
	}

	if securityConfig == nil {
		if n.config.JoinAddr == "" {
			// if we're not joining a cluster, bootstrap a new one - and we have to set the unlock key
			n.unlockKey = nil
			if n.config.AutoLockManagers {
				n.unlockKey = encryption.GenerateSecretKey()
			}
			krw = ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
			rootCA, err = ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA)
			if err != nil {
				return nil, err
			}
			log.G(ctx).Debug("generated CA key and certificate")
		} else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk
			rootCA, err = ca.DownloadRootCA(ctx, paths.RootCA, n.config.JoinToken, n.remotes)
			if err != nil {
				return nil, err
			}
			log.G(ctx).Debug("downloaded CA certificate")
		}

		// Obtain new certs and setup TLS certificates renewal for this node:
		// - If certificates weren't present on disk, we call CreateSecurityConfig, which blocks
		//   until a valid certificate has been issued.
		// - We wait for CreateSecurityConfig to finish since we need a certificate to operate.

		// Attempt to load certificate from disk
		securityConfig, err = ca.LoadSecurityConfig(ctx, rootCA, krw)
		if err == nil {
			log.G(ctx).WithFields(logrus.Fields{
				"node.id": securityConfig.ClientTLSCreds.NodeID(),
			}).Debugf("loaded TLS certificate")
		} else {
			if _, ok := errors.Cause(err).(ca.ErrInvalidKEK); ok {
				return nil, ErrInvalidUnlockKey
			}
			log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", krw.Target())

			securityConfig, err = rootCA.CreateSecurityConfig(ctx, krw, ca.CertificateRequestConfig{
				Token:        n.config.JoinToken,
				Availability: n.config.Availability,
				Remotes:      n.remotes,
			})

			if err != nil {
				return nil, err
			}
		}
	}

	n.Lock()
	n.role = securityConfig.ClientTLSCreds.Role()
	n.nodeID = securityConfig.ClientTLSCreds.NodeID()
	n.roleCond.Broadcast()
	n.Unlock()

	return securityConfig, nil
}
示例#4
0
文件: node.go 项目: ollie314/docker
func (n *Node) loadSecurityConfig(ctx context.Context) (*ca.SecurityConfig, error) {
	paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory))
	var securityConfig *ca.SecurityConfig

	krw := ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
	if err := krw.Migrate(); err != nil {
		return nil, err
	}

	// Check if we already have a valid certificates on disk.
	rootCA, err := ca.GetLocalRootCA(paths.RootCA)
	if err != nil && err != ca.ErrNoLocalRootCA {
		return nil, err
	}
	if err == nil {
		clientTLSCreds, serverTLSCreds, err := ca.LoadTLSCreds(rootCA, krw)
		_, ok := errors.Cause(err).(ca.ErrInvalidKEK)
		switch {
		case err == nil:
			securityConfig = ca.NewSecurityConfig(&rootCA, krw, clientTLSCreds, serverTLSCreds)
			log.G(ctx).Debug("loaded CA and TLS certificates")
		case ok:
			return nil, ErrInvalidUnlockKey
		case os.IsNotExist(err):
			break
		default:
			return nil, errors.Wrapf(err, "error while loading TLS certificate in %s", paths.Node.Cert)
		}
	}

	if securityConfig == nil {
		if n.config.JoinAddr == "" {
			// if we're not joining a cluster, bootstrap a new one - and we have to set the unlock key
			n.unlockKey = nil
			if n.config.AutoLockManagers {
				n.unlockKey = encryption.GenerateSecretKey()
			}
			krw = ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{})
			rootCA, err = ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA)
			if err != nil {
				return nil, err
			}
			log.G(ctx).Debug("generated CA key and certificate")
		} else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk
			rootCA, err = ca.DownloadRootCA(ctx, paths.RootCA, n.config.JoinToken, n.remotes)
			if err != nil {
				return nil, err
			}
			log.G(ctx).Debug("downloaded CA certificate")
		}

		// Obtain new certs and setup TLS certificates renewal for this node:
		// - We call LoadOrCreateSecurityConfig which blocks until a valid certificate has been issued
		// - We retrieve the nodeID from LoadOrCreateSecurityConfig through the info channel. This allows
		// us to display the ID before the certificate gets issued (for potential approval).
		// - We wait for LoadOrCreateSecurityConfig to finish since we need a certificate to operate.
		// - Given a valid certificate, spin a renewal go-routine that will ensure that certificates stay
		// up to date.
		issueResponseChan := make(chan api.IssueNodeCertificateResponse, 1)
		go func() {
			select {
			case <-ctx.Done():
			case resp := <-issueResponseChan:
				log.G(log.WithModule(ctx, "tls")).WithFields(logrus.Fields{
					"node.id": resp.NodeID,
				}).Debugf("loaded TLS certificate")
				n.Lock()
				n.nodeID = resp.NodeID
				n.nodeMembership = resp.NodeMembership
				n.Unlock()
				close(n.certificateRequested)
			}
		}()

		// LoadOrCreateSecurityConfig is the point at which a new node joining a cluster will retrieve TLS
		// certificates and write them to disk
		securityConfig, err = ca.LoadOrCreateSecurityConfig(
			ctx, rootCA, n.config.JoinToken, ca.ManagerRole, n.remotes, issueResponseChan, krw)
		if err != nil {
			if _, ok := errors.Cause(err).(ca.ErrInvalidKEK); ok {
				return nil, ErrInvalidUnlockKey
			}
			return nil, err
		}
	}

	n.Lock()
	n.role = securityConfig.ClientTLSCreds.Role()
	n.nodeID = securityConfig.ClientTLSCreds.NodeID()
	n.nodeMembership = api.NodeMembershipAccepted
	n.roleCond.Broadcast()
	n.Unlock()

	return securityConfig, nil
}
示例#5
0
// UpdateCluster updates a Cluster referenced by ClusterID with the given ClusterSpec.
// - Returns `NotFound` if the Cluster is not found.
// - Returns `InvalidArgument` if the ClusterSpec is malformed.
// - Returns `Unimplemented` if the ClusterSpec references unimplemented features.
// - Returns an error if the update fails.
func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRequest) (*api.UpdateClusterResponse, error) {
	if request.ClusterID == "" || request.ClusterVersion == nil {
		return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
	}
	if err := validateClusterSpec(request.Spec); err != nil {
		return nil, err
	}

	var cluster *api.Cluster
	err := s.store.Update(func(tx store.Tx) error {
		cluster = store.GetCluster(tx, request.ClusterID)
		if cluster == nil {
			return nil
		}
		cluster.Meta.Version = *request.ClusterVersion
		cluster.Spec = *request.Spec.Copy()

		expireBlacklistedCerts(cluster)

		if request.Rotation.WorkerJoinToken {
			cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(s.rootCA)
		}
		if request.Rotation.ManagerJoinToken {
			cluster.RootCA.JoinTokens.Manager = ca.GenerateJoinToken(s.rootCA)
		}

		var unlockKeys []*api.EncryptionKey
		var managerKey *api.EncryptionKey
		for _, eKey := range cluster.UnlockKeys {
			if eKey.Subsystem == ca.ManagerRole {
				if !cluster.Spec.EncryptionConfig.AutoLockManagers {
					continue
				}
				managerKey = eKey
			}
			unlockKeys = append(unlockKeys, eKey)
		}

		switch {
		case !cluster.Spec.EncryptionConfig.AutoLockManagers:
			break
		case managerKey == nil:
			unlockKeys = append(unlockKeys, &api.EncryptionKey{
				Subsystem: ca.ManagerRole,
				Key:       encryption.GenerateSecretKey(),
			})
		case request.Rotation.ManagerUnlockKey:
			managerKey.Key = encryption.GenerateSecretKey()
		}
		cluster.UnlockKeys = unlockKeys

		return store.UpdateCluster(tx, cluster)
	})
	if err != nil {
		return nil, err
	}
	if cluster == nil {
		return nil, grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
	}

	redactedClusters := redactClusters([]*api.Cluster{cluster})

	// WARN: we should never return cluster here. We need to redact the private fields first.
	return &api.UpdateClusterResponse{
		Cluster: redactedClusters[0],
	}, nil
}