func (mc *managersCluster) addAgents(count int) error { var addrs []api.Peer for _, m := range mc.ms { addrs = append(addrs, api.Peer{Addr: m.addr}) } for i := 0; i < count; i++ { asConfig, err := mc.tc.NewNodeConfig(ca.AgentRole) if err != nil { return err } managers := remotes.NewRemotes(addrs...) id := strconv.Itoa(rand.Int()) a, err := agent.New(&agent.Config{ Hostname: "hostname_" + id, Managers: managers, Executor: &NoopExecutor{}, Credentials: asConfig.ClientTLSCreds, }) if err != nil { return err } if err := a.Start(context.Background()); err != nil { return err } mc.agents = append(mc.agents, a) } return nil }
func TestAgentStartStop(t *testing.T) { tc := testutils.NewTestCA(t) defer tc.Stop() agentSecurityConfig, err := tc.NewNodeConfig(ca.AgentRole) assert.NoError(t, err) addr := "localhost:4949" remotes := remotes.NewRemotes(api.Peer{Addr: addr}) db, cleanup := storageTestEnv(t) defer cleanup() agent, err := New(&Config{ Executor: &NoopExecutor{}, Managers: remotes, Credentials: agentSecurityConfig.ClientTLSCreds, DB: db, }) assert.NoError(t, err) assert.NotNil(t, agent) ctx, _ := context.WithTimeout(context.Background(), 5000*time.Millisecond) assert.Equal(t, errAgentNotStarted, agent.Stop(ctx)) assert.NoError(t, agent.Start(ctx)) if err := agent.Start(ctx); err != errAgentStarted { t.Fatalf("expected agent started error: %v", err) } assert.NoError(t, agent.Stop(ctx)) }
func agentTestEnv(t *testing.T) (*Agent, func()) { var cleanup []func() tc := testutils.NewTestCA(t) cleanup = append(cleanup, func() { tc.Stop() }) agentSecurityConfig, err := tc.NewNodeConfig(ca.AgentRole) assert.NoError(t, err) addr := "localhost:4949" remotes := remotes.NewRemotes(api.Peer{Addr: addr}) db, cleanupStorage := storageTestEnv(t) cleanup = append(cleanup, func() { cleanupStorage() }) agent, err := New(&Config{ Executor: &NoopExecutor{}, Managers: remotes, Credentials: agentSecurityConfig.ClientTLSCreds, DB: db, }) return agent, func() { for i := len(cleanup) - 1; i >= 0; i-- { cleanup[i]() } } }
func newPersistentRemotes(f string, peers ...api.Peer) *persistentRemotes { pr := &persistentRemotes{ storePath: f, Remotes: remotes.NewRemotes(peers...), } pr.c = sync.NewCond(pr.RLocker()) return pr }
func (m *Manager) updateKEK(ctx context.Context, cluster *api.Cluster) error { securityConfig := m.config.SecurityConfig nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID() logger := log.G(ctx).WithFields(logrus.Fields{ "node.id": nodeID, "node.role": ca.ManagerRole, }) kekData := ca.KEKData{Version: cluster.Meta.Version.Index} for _, encryptionKey := range cluster.UnlockKeys { if encryptionKey.Subsystem == ca.ManagerRole { kekData.KEK = encryptionKey.Key break } } updated, unlockedToLocked, err := m.dekRotator.MaybeUpdateKEK(kekData) if err != nil { logger.WithError(err).Errorf("failed to re-encrypt TLS key with a new KEK") return err } if updated { logger.Debug("successfully rotated KEK") } if unlockedToLocked { // a best effort attempt to update the TLS certificate - if it fails, it'll be updated the next time it renews; // don't wait because it might take a bit go func() { insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) conn, err := grpc.Dial( m.config.ControlAPI, grpc.WithTransportCredentials(insecureCreds), grpc.WithDialer( func(addr string, timeout time.Duration) (net.Conn, error) { return xnet.DialTimeoutLocal(addr, timeout) }), ) if err != nil { logger.WithError(err).Error("failed to connect to local manager socket after locking the cluster") return } defer conn.Close() connBroker := connectionbroker.New(remotes.NewRemotes()) connBroker.SetLocalConn(conn) if err := ca.RenewTLSConfigNow(ctx, securityConfig, connBroker); err != nil { logger.WithError(err).Error("failed to download new TLS certificate after locking the cluster") } }() } return nil }
func (m *Manager) updateKEK(ctx context.Context, cluster *api.Cluster) error { securityConfig := m.config.SecurityConfig nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID() logger := log.G(ctx).WithFields(logrus.Fields{ "node.id": nodeID, "node.role": ca.ManagerRole, }) // we are our own peer from which we get certs - try to connect over the local socket r := remotes.NewRemotes(api.Peer{Addr: m.Addr(), NodeID: nodeID}) kekData := ca.KEKData{Version: cluster.Meta.Version.Index} for _, encryptionKey := range cluster.UnlockKeys { if encryptionKey.Subsystem == ca.ManagerRole { kekData.KEK = encryptionKey.Key break } } updated, unlockedToLocked, err := m.dekRotator.MaybeUpdateKEK(kekData) if err != nil { logger.WithError(err).Errorf("failed to re-encrypt TLS key with a new KEK") return err } if updated { logger.Debug("successfully rotated KEK") } if unlockedToLocked { // a best effort attempt to update the TLS certificate - if it fails, it'll be updated the next time it renews; // don't wait because it might take a bit go func() { if err := ca.RenewTLSConfigNow(ctx, securityConfig, r); err != nil { logger.WithError(err).Errorf("failed to download new TLS certificate after locking the cluster") } }() } return nil }
// NewTestCA is a helper method that creates a TestCA and a bunch of default // connections and security configs. func NewTestCA(t *testing.T) *TestCA { tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") assert.NoError(t, err) s := store.NewMemoryStore(nil) paths := ca.NewConfigPaths(tempBaseDir) organization := identity.NewID() rootCA, err := createAndWriteRootCA("swarm-test-CA", paths.RootCA, ca.DefaultNodeCertExpiration) assert.NoError(t, err) var ( externalSigningServer *ExternalSigningServer externalCAs []*api.ExternalCA ) if External { // Start the CA API server. externalSigningServer, err = NewExternalSigningServer(rootCA, tempBaseDir) assert.NoError(t, err) externalCAs = []*api.ExternalCA{ { Protocol: api.ExternalCA_CAProtocolCFSSL, URL: externalSigningServer.URL, }, } } managerConfig, err := genSecurityConfig(s, rootCA, ca.ManagerRole, organization, "", External) assert.NoError(t, err) managerDiffOrgConfig, err := genSecurityConfig(s, rootCA, ca.ManagerRole, "swarm-test-org-2", "", External) assert.NoError(t, err) agentConfig, err := genSecurityConfig(s, rootCA, ca.AgentRole, organization, "", External) assert.NoError(t, err) l, err := net.Listen("tcp", "127.0.0.1:0") assert.NoError(t, err) baseOpts := []grpc.DialOption{grpc.WithTimeout(10 * time.Second)} insecureClientOpts := append(baseOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))) clientOpts := append(baseOpts, grpc.WithTransportCredentials(agentConfig.ClientTLSCreds)) managerOpts := append(baseOpts, grpc.WithTransportCredentials(managerConfig.ClientTLSCreds)) managerDiffOrgOpts := append(baseOpts, grpc.WithTransportCredentials(managerDiffOrgConfig.ClientTLSCreds)) conn1, err := grpc.Dial(l.Addr().String(), insecureClientOpts...) assert.NoError(t, err) conn2, err := grpc.Dial(l.Addr().String(), clientOpts...) assert.NoError(t, err) conn3, err := grpc.Dial(l.Addr().String(), managerOpts...) assert.NoError(t, err) conn4, err := grpc.Dial(l.Addr().String(), managerDiffOrgOpts...) assert.NoError(t, err) serverOpts := []grpc.ServerOption{grpc.Creds(managerConfig.ServerTLSCreds)} grpcServer := grpc.NewServer(serverOpts...) managerToken := ca.GenerateJoinToken(&rootCA) workerToken := ca.GenerateJoinToken(&rootCA) createClusterObject(t, s, organization, workerToken, managerToken, externalCAs...) caServer := ca.NewServer(s, managerConfig) api.RegisterCAServer(grpcServer, caServer) api.RegisterNodeCAServer(grpcServer, caServer) ctx := context.Background() go grpcServer.Serve(l) go caServer.Run(ctx) // Wait for caServer to be ready to serve <-caServer.Ready() remotes := remotes.NewRemotes(api.Peer{Addr: l.Addr().String()}) caClients := []api.CAClient{api.NewCAClient(conn1), api.NewCAClient(conn2), api.NewCAClient(conn3)} nodeCAClients := []api.NodeCAClient{api.NewNodeCAClient(conn1), api.NewNodeCAClient(conn2), api.NewNodeCAClient(conn3), api.NewNodeCAClient(conn4)} conns := []*grpc.ClientConn{conn1, conn2, conn3, conn4} return &TestCA{ RootCA: rootCA, ExternalSigningServer: externalSigningServer, MemoryStore: s, TempDir: tempBaseDir, Organization: organization, Paths: paths, Context: ctx, CAClients: caClients, NodeCAClients: nodeCAClients, Conns: conns, Server: grpcServer, CAServer: caServer, WorkerToken: workerToken, ManagerToken: managerToken, Remotes: remotes, } }