Ejemplo n.º 1
1
// Add adds new node and returns it, it replaces existing without notification.
func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode {
	s.mu.Lock()
	defer s.mu.Unlock()
	var attempts int
	var registered time.Time
	if existRn, ok := s.nodes[n.ID]; ok {
		attempts = existRn.Attempts
		registered = existRn.Registered
		existRn.Heartbeat.Stop()
		delete(s.nodes, n.ID)
	}
	if registered.IsZero() {
		registered = time.Now()
	}
	rn := &registeredNode{
		SessionID:  identity.NewID(), // session ID is local to the dispatcher.
		Node:       n,
		Registered: registered,
		Attempts:   attempts,
		Disconnect: make(chan struct{}),
	}
	s.nodes[n.ID] = rn
	rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplierNormal, expireFunc)
	return rn
}
Ejemplo n.º 2
0
// CreateSecurityConfig creates a new key and cert for this node, either locally
// or via a remote CA.
func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWriter, config CertificateRequestConfig) (*SecurityConfig, error) {
	ctx = log.WithModule(ctx, "tls")

	var (
		tlsKeyPair *tls.Certificate
		err        error
	)

	if rootCA.CanSign() {
		// Create a new random ID for this certificate
		cn := identity.NewID()
		org := identity.NewID()

		proposedRole := ManagerRole
		tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org)
		if err != nil {
			log.G(ctx).WithFields(logrus.Fields{
				"node.id":   cn,
				"node.role": proposedRole,
			}).WithError(err).Errorf("failed to issue and save new certificate")
			return nil, err
		}

		log.G(ctx).WithFields(logrus.Fields{
			"node.id":   cn,
			"node.role": proposedRole,
		}).Debug("issued new TLS certificate")
	} else {
		// Request certificate issuance from a remote CA.
		// Last argument is nil because at this point we don't have any valid TLS creds
		tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, krw, config)
		if err != nil {
			log.G(ctx).WithError(err).Error("failed to request save new certificate")
			return nil, err
		}
	}
	// Create the Server TLS Credentials for this node. These will not be used by workers.
	serverTLSCreds, err := rootCA.NewServerTLSCredentials(tlsKeyPair)
	if err != nil {
		return nil, err
	}

	// Create a TLSConfig to be used when this node connects as a client to another remote node.
	// We're using ManagerRole as remote serverName for TLS host verification
	clientTLSCreds, err := rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole)
	if err != nil {
		return nil, err
	}
	log.G(ctx).WithFields(logrus.Fields{
		"node.id":   clientTLSCreds.NodeID(),
		"node.role": clientTLSCreds.Role(),
	}).Debugf("new node credentials generated: %s", krw.Target())

	return NewSecurityConfig(&rootCA, krw, clientTLSCreds, serverTLSCreds), nil
}
Ejemplo n.º 3
0
// If there are CAs and TLS certs on disk, it tries to load and fails if there
// are any errors, even if a join token is provided.
func TestLoadSecurityConfigLoadFromDisk(t *testing.T) {
	tempdir, err := ioutil.TempDir("", "test-load-node-tls")
	require.NoError(t, err)
	defer os.RemoveAll(tempdir)

	paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates"))

	tc := cautils.NewTestCA(t)
	defer tc.Stop()
	peer, err := tc.ConnBroker.Remotes().Select()
	require.NoError(t, err)

	// Load successfully with valid passphrase
	rootCA, err := ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA)
	require.NoError(t, err)
	krw := ca.NewKeyReadWriter(paths.Node, []byte("passphrase"), nil)
	require.NoError(t, err)
	_, err = rootCA.IssueAndSaveNewCertificates(krw, identity.NewID(), ca.WorkerRole, identity.NewID())
	require.NoError(t, err)

	node, err := New(&Config{
		StateDir:  tempdir,
		JoinAddr:  peer.Addr,
		JoinToken: tc.ManagerToken,
		UnlockKey: []byte("passphrase"),
	})
	require.NoError(t, err)
	securityConfig, err := node.loadSecurityConfig(context.Background())
	require.NoError(t, err)
	require.NotNil(t, securityConfig)

	// Invalid passphrase
	node, err = New(&Config{
		StateDir:  tempdir,
		JoinAddr:  peer.Addr,
		JoinToken: tc.ManagerToken,
	})
	require.NoError(t, err)
	_, err = node.loadSecurityConfig(context.Background())
	require.Equal(t, ErrInvalidUnlockKey, err)

	// Invalid CA
	rootCA, err = ca.CreateRootCA(ca.DefaultRootCN, paths.RootCA)
	require.NoError(t, err)
	node, err = New(&Config{
		StateDir:  tempdir,
		JoinAddr:  peer.Addr,
		JoinToken: tc.ManagerToken,
		UnlockKey: []byte("passphrase"),
	})
	require.NoError(t, err)
	_, err = node.loadSecurityConfig(context.Background())
	require.IsType(t, x509.UnknownAuthorityError{}, errors.Cause(err))
}
Ejemplo n.º 4
0
// BootstrapCluster receives a directory and creates both new Root CA key material
// and a ManagerRole key/certificate pair to be used by the initial cluster manager
func BootstrapCluster(baseCertDir string) error {
	paths := NewConfigPaths(baseCertDir)

	rootCA, err := CreateAndWriteRootCA(rootCN, paths.RootCA)
	if err != nil {
		return err
	}

	nodeID := identity.NewID()
	newOrg := identity.NewID()
	_, err = GenerateAndSignNewTLSCert(rootCA, nodeID, ManagerRole, newOrg, paths.Node)

	return err
}
Ejemplo n.º 5
0
func genTask() *api.Task {
	return &api.Task{
		ID:        identity.NewID(),
		ServiceID: identity.NewID(),
		Status:    *genTaskStatus(),
		Spec: api.TaskSpec{
			Runtime: &api.TaskSpec_Container{
				Container: &api.ContainerSpec{
					Image:   "foo",
					Command: []string{"this", "-w", "works"},
				},
			},
		},
	}
}
Ejemplo n.º 6
0
func newTask(cluster *api.Cluster, service *api.Service, instance uint64) *api.Task {
	var logDriver *api.Driver
	if service.Spec.Task.LogDriver != nil {
		// use the log driver specific to the task, if we have it.
		logDriver = service.Spec.Task.LogDriver
	} else if cluster != nil {
		// pick up the cluster default, if available.
		logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
	}

	// NOTE(stevvooe): For now, we don't override the container naming and
	// labeling scheme in the agent. If we decide to do this in the future,
	// they should be overridden here.
	return &api.Task{
		ID:                 identity.NewID(),
		ServiceAnnotations: service.Spec.Annotations,
		Spec:               service.Spec.Task,
		ServiceID:          service.ID,
		Slot:               instance,
		Status: api.TaskStatus{
			State:     api.TaskStateNew,
			Timestamp: ptypes.MustTimestampProto(time.Now()),
			Message:   "created",
		},
		Endpoint: &api.Endpoint{
			Spec: service.Spec.Endpoint.Copy(),
		},
		DesiredState: api.TaskStateRunning,
		LogDriver:    logDriver,
	}
}
Ejemplo n.º 7
0
// CreateNetwork creates and returns a Network based on the provided NetworkSpec.
// - Returns `InvalidArgument` if the NetworkSpec is malformed.
// - Returns an error if the creation fails.
func (s *Server) CreateNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) {
	// if you change this function, you have to change createInternalNetwork in
	// the tests to match it (except the part where we check the label).
	if err := validateNetworkSpec(request.Spec, s.pg); err != nil {
		return nil, err
	}

	if _, ok := request.Spec.Annotations.Labels["com.docker.swarm.internal"]; ok {
		return nil, grpc.Errorf(codes.PermissionDenied, "label com.docker.swarm.internal is for predefined internal networks and cannot be applied by users")
	}

	// TODO(mrjana): Consider using `Name` as a primary key to handle
	// duplicate creations. See #65
	n := &api.Network{
		ID:   identity.NewID(),
		Spec: *request.Spec,
	}

	err := s.store.Update(func(tx store.Tx) error {
		return store.CreateNetwork(tx, n)
	})
	if err != nil {
		return nil, err
	}

	return &api.CreateNetworkResponse{
		Network: n,
	}, nil
}
Ejemplo n.º 8
0
func setupNodes(b *testing.B, n int) (*MemoryStore, []string) {
	s := NewMemoryStore(nil)

	nodeIDs := make([]string, n)

	for i := 0; i < n; i++ {
		nodeIDs[i] = identity.NewID()
	}

	b.ResetTimer()

	_ = s.Update(func(tx1 Tx) error {
		for i := 0; i < n; i++ {
			_ = CreateNode(tx1, &api.Node{
				ID: nodeIDs[i],
				Spec: api.NodeSpec{
					Annotations: api.Annotations{
						Name: "name" + strconv.Itoa(i),
					},
				},
			})
		}
		return nil
	})

	return s, nodeIDs
}
Ejemplo n.º 9
0
func newTask(cluster *api.Cluster, service *api.Service, slot uint64) *api.Task {
	var logDriver *api.Driver
	if service.Spec.Task.LogDriver != nil {
		// use the log driver specific to the task, if we have it.
		logDriver = service.Spec.Task.LogDriver
	} else if cluster != nil {
		// pick up the cluster default, if available.
		logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
	}

	taskID := identity.NewID()
	// We use the following scheme to assign Task names to Annotations:
	// Annotations.Name := <ServiceAnnotations.Name>.<Slot>.<TaskID>
	name := fmt.Sprintf("%v.%v.%v", service.Spec.Annotations.Name, slot, taskID)

	return &api.Task{
		ID:                 taskID,
		Annotations:        api.Annotations{Name: name},
		ServiceAnnotations: service.Spec.Annotations,
		Spec:               service.Spec.Task,
		ServiceID:          service.ID,
		Slot:               slot,
		Status: api.TaskStatus{
			State:     api.TaskStateNew,
			Timestamp: ptypes.MustTimestampProto(time.Now()),
			Message:   "created",
		},
		Endpoint: &api.Endpoint{
			Spec: service.Spec.Endpoint.Copy(),
		},
		DesiredState: api.TaskStateRunning,
		LogDriver:    logDriver,
	}
}
Ejemplo n.º 10
0
// CreateService creates and return a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
// - Returns `AlreadyExists` if the ServiceID conflicts.
// - Returns an error if the creation fails.
func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
	if err := validateServiceSpec(request.Spec); err != nil {
		return nil, err
	}

	if err := s.checkPortConflicts(request.Spec, ""); err != nil {
		return nil, err
	}

	// TODO(aluzzardi): Consider using `Name` as a primary key to handle
	// duplicate creations. See #65
	service := &api.Service{
		ID:   identity.NewID(),
		Spec: *request.Spec,
	}

	err := s.store.Update(func(tx store.Tx) error {
		return store.CreateService(tx, service)
	})
	if err != nil {
		return nil, err
	}

	return &api.CreateServiceResponse{
		Service: service,
	}, nil
}
Ejemplo n.º 11
0
// NewInitNode creates a new raft node initiating the cluster
// for other members to join
func NewInitNode(t *testing.T, tc *cautils.TestCA, raftConfig *api.RaftConfig, opts ...raft.NewNodeOptions) (*TestNode, *fakeclock.FakeClock) {
	ctx := context.Background()
	clockSource := fakeclock.NewFakeClock(time.Now())
	n := NewNode(t, clockSource, tc, opts...)

	err := n.Node.JoinAndStart()
	require.NoError(t, err, "can't join cluster")

	leadershipCh, cancel := n.SubscribeLeadership()
	defer cancel()

	go n.Run(ctx)

	// Wait for the node to become the leader.
	<-leadershipCh

	if raftConfig != nil {
		assert.NoError(t, n.MemoryStore().Update(func(tx store.Tx) error {
			return store.CreateCluster(tx, &api.Cluster{
				ID: identity.NewID(),
				Spec: api.ClusterSpec{
					Annotations: api.Annotations{
						Name: store.DefaultClusterName,
					},
					Raft: *raftConfig,
				},
			})
		}))
	}

	return n, clockSource
}
Ejemplo n.º 12
0
// newNode creates new node with specific role(manager or agent) and joins to
// existing cluster. if joinAddr is empty string, then new cluster will be initialized.
// It uses TestExecutor as executor. If lateBind is set, the remote API port is not
// bound.  If rootCA is set, this root is used to bootstrap the node's TLS certs.
func newTestNode(joinAddr, joinToken string, lateBind bool, rootCA *ca.RootCA) (*testNode, error) {
	tmpDir, err := ioutil.TempDir("", "swarmkit-integration-")
	if err != nil {
		return nil, err
	}

	cAddr := filepath.Join(tmpDir, "control.sock")
	cfg := &node.Config{
		ListenControlAPI: cAddr,
		JoinAddr:         joinAddr,
		StateDir:         tmpDir,
		Executor:         &TestExecutor{},
		JoinToken:        joinToken,
	}
	if !lateBind {
		cfg.ListenRemoteAPI = "127.0.0.1:0"
	}
	if rootCA != nil {
		certDir := filepath.Join(tmpDir, "certificates")
		if err := os.MkdirAll(certDir, 0700); err != nil {
			return nil, err
		}
		certPaths := ca.NewConfigPaths(certDir)
		if err := ioutil.WriteFile(certPaths.RootCA.Cert, rootCA.Cert, 0644); err != nil {
			return nil, err
		}
		if err := ioutil.WriteFile(certPaths.RootCA.Key, rootCA.Key, 0600); err != nil {
			return nil, err
		}
		// generate TLS certs for this manager for bootstrapping, else the node will generate its own CA
		_, err := rootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(certPaths.Node, nil, nil),
			identity.NewID(), ca.ManagerRole, identity.NewID())
		if err != nil {
			return nil, err
		}
	}

	node, err := node.New(cfg)
	if err != nil {
		return nil, err
	}
	return &testNode{
		config:   cfg,
		node:     node,
		stateDir: tmpDir,
	}, nil
}
Ejemplo n.º 13
0
// assumes spec is not nil
func secretFromSecretSpec(spec *api.SecretSpec) *api.Secret {
	return &api.Secret{
		ID:         identity.NewID(),
		Spec:       *spec,
		SecretSize: int64(len(spec.Data)),
		Digest:     digest.FromBytes(spec.Data).String(),
	}
}
Ejemplo n.º 14
0
// AttachNetwork allows the node to request the resources
// allocation needed for a network attachment on the specific node.
// - Returns `InvalidArgument` if the Spec is malformed.
// - Returns `NotFound` if the Network is not found.
// - Returns `PermissionDenied` if the Network is not manually attachable.
// - Returns an error if the creation fails.
func (ra *ResourceAllocator) AttachNetwork(ctx context.Context, request *api.AttachNetworkRequest) (*api.AttachNetworkResponse, error) {
	nodeInfo, err := ca.RemoteNode(ctx)
	if err != nil {
		return nil, err
	}

	var network *api.Network
	ra.store.View(func(tx store.ReadTx) {
		network = store.GetNetwork(tx, request.Config.Target)
		if network == nil {
			if networks, err := store.FindNetworks(tx, store.ByName(request.Config.Target)); err == nil && len(networks) == 1 {
				network = networks[0]
			}
		}
	})
	if network == nil {
		return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.Config.Target)
	}

	if !network.Spec.Attachable {
		return nil, grpc.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target)
	}

	t := &api.Task{
		ID:     identity.NewID(),
		NodeID: nodeInfo.NodeID,
		Spec: api.TaskSpec{
			Runtime: &api.TaskSpec_Attachment{
				Attachment: &api.NetworkAttachmentSpec{
					ContainerID: request.ContainerID,
				},
			},
			Networks: []*api.NetworkAttachmentConfig{
				{
					Target:    network.ID,
					Addresses: request.Config.Addresses,
				},
			},
		},
		Status: api.TaskStatus{
			State:     api.TaskStateNew,
			Timestamp: ptypes.MustTimestampProto(time.Now()),
			Message:   "created",
		},
		DesiredState: api.TaskStateRunning,
		// TODO: Add Network attachment.
	}

	if err := ra.store.Update(func(tx store.Tx) error {
		return store.CreateTask(tx, t)
	}); err != nil {
		return nil, err
	}

	return &api.AttachNetworkResponse{AttachmentID: t.ID}, nil
}
Ejemplo n.º 15
0
func main() {
	// Create root material within the current directory.
	rootPaths := ca.CertPaths{
		Cert: filepath.Join("ca", "root.crt"),
		Key:  filepath.Join("ca", "root.key"),
	}

	// Initialize the Root CA.
	rootCA, err := ca.CreateRootCA("external-ca-example", rootPaths)
	if err != nil {
		logrus.Fatalf("unable to initialize Root CA: %s", err)
	}

	// Create the initial manager node credentials.
	nodeConfigPaths := ca.NewConfigPaths("certificates")

	clusterID := identity.NewID()
	nodeID := identity.NewID()

	kw := ca.NewKeyReadWriter(nodeConfigPaths.Node, nil, nil)
	if _, err := rootCA.IssueAndSaveNewCertificates(kw, nodeID, ca.ManagerRole, clusterID); err != nil {
		logrus.Fatalf("unable to create initial manager node credentials: %s", err)
	}

	// And copy the Root CA certificate into the node config path for its
	// CA.
	ioutil.WriteFile(nodeConfigPaths.RootCA.Cert, rootCA.Cert, os.FileMode(0644))

	server, err := testutils.NewExternalSigningServer(rootCA, "ca")
	if err != nil {
		logrus.Fatalf("unable to start server: %s", err)
	}

	defer server.Stop()

	logrus.Infof("Now run: swarmd -d . --listen-control-api ./swarmd.sock --external-ca protocol=cfssl,url=%s", server.URL)

	sigC := make(chan os.Signal, 1)
	signal.Notify(sigC, syscall.SIGTERM, syscall.SIGINT)

	<-sigC
}
Ejemplo n.º 16
0
func createTask(t *testing.T, ts *testServer, desiredState api.TaskState) *api.Task {
	task := &api.Task{
		ID:           identity.NewID(),
		DesiredState: desiredState,
	}
	err := ts.Store.Update(func(tx store.Tx) error {
		return store.CreateTask(tx, task)
	})
	assert.NoError(t, err)
	return task
}
Ejemplo n.º 17
0
func (lb *LogBroker) newSubscription(selector *api.LogSelector, options *api.LogSubscriptionOptions) *subscription {
	lb.mu.RLock()
	defer lb.mu.RUnlock()

	subscription := newSubscription(lb.store, &api.SubscriptionMessage{
		ID:       identity.NewID(),
		Selector: selector,
		Options:  options,
	}, lb.subscriptionQueue)

	return subscription
}
Ejemplo n.º 18
0
func createClusterObject(t *testing.T, s *store.MemoryStore, acceptancePolicy api.AcceptancePolicy) {
	assert.NoError(t, s.Update(func(tx store.Tx) error {
		store.CreateCluster(tx, &api.Cluster{
			ID: identity.NewID(),
			Spec: api.ClusterSpec{
				Annotations: api.Annotations{
					Name: store.DefaultClusterName,
				},
				AcceptancePolicy: acceptancePolicy,
			},
		})
		return nil
	}))
}
Ejemplo n.º 19
0
// Add adds new node and returns it, it replaces existing without notification.
func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode {
	s.mu.Lock()
	defer s.mu.Unlock()
	if existRn, ok := s.nodes[n.ID]; ok {
		existRn.Heartbeat.Stop()
		delete(s.nodes, n.ID)
	}
	rn := &registeredNode{
		SessionID:  identity.NewID(), // session ID is local to the dispatcher.
		Node:       n,
		Disconnect: make(chan struct{}),
	}
	s.nodes[n.ID] = rn
	rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplier, expireFunc)
	return rn
}
Ejemplo n.º 20
0
func newTask(service *api.Service, instance uint64) *api.Task {
	// NOTE(stevvooe): For now, we don't override the container naming and
	// labeling scheme in the agent. If we decide to do this in the future,
	// they should be overridden here.
	return &api.Task{
		ID:                 identity.NewID(),
		ServiceAnnotations: service.Spec.Annotations,
		Spec:               service.Spec.Task,
		ServiceID:          service.ID,
		Slot:               instance,
		Status: api.TaskStatus{
			State:     api.TaskStateNew,
			Timestamp: ptypes.MustTimestampProto(time.Now()),
			Message:   "created",
		},
		DesiredState: api.TaskStateRunning,
	}
}
Ejemplo n.º 21
0
// SubscribeLogs creates a log subscription and streams back logs
func (lb *LogBroker) SubscribeLogs(request *api.SubscribeLogsRequest, stream api.Logs_SubscribeLogsServer) error {
	ctx := stream.Context()

	if err := validateSelector(request.Selector); err != nil {
		return err
	}

	subscription := &api.SubscriptionMessage{
		ID:       identity.NewID(),
		Selector: request.Selector,
		Options:  request.Options,
	}

	log := log.G(ctx).WithFields(
		logrus.Fields{
			"method":          "(*LogBroker).SubscribeLogs",
			"subscription.id": subscription.ID,
		},
	)

	log.Debug("subscribed")

	publishCh, publishCancel := lb.subscribe(subscription.ID)
	defer publishCancel()

	lb.registerSubscription(subscription)
	defer lb.unregisterSubscription(subscription)

	for {
		select {
		case event := <-publishCh:
			publish := event.(*api.PublishLogsMessage)
			if err := stream.Send(&api.SubscribeLogsMessage{
				Messages: publish.Messages,
			}); err != nil {
				return err
			}
		case <-ctx.Done():
			return ctx.Err()
		case <-lb.pctx.Done():
			return nil
		}
	}
}
Ejemplo n.º 22
0
func genTask(t *testing.T) *api.Task {
	const (
		nodeID    = "dockerexec-test-node-id"
		serviceID = "dockerexec-test-service"
		reference = "stevvooe/foo:latest"
	)

	return &api.Task{
		ID:        identity.NewID(),
		ServiceID: serviceID,
		NodeID:    nodeID,
		Spec: api.TaskSpec{
			Runtime: &api.TaskSpec_Container{
				Container: &api.ContainerSpec{
					Image:           reference,
					StopGracePeriod: ptypes.DurationProto(10 * time.Second),
				},
			},
		},
	}
}
Ejemplo n.º 23
0
func newTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
	var logDriver *api.Driver
	if service.Spec.Task.LogDriver != nil {
		// use the log driver specific to the task, if we have it.
		logDriver = service.Spec.Task.LogDriver
	} else if cluster != nil {
		// pick up the cluster default, if available.
		logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
	}

	taskID := identity.NewID()
	task := api.Task{
		ID:                 taskID,
		ServiceAnnotations: service.Spec.Annotations,
		Spec:               service.Spec.Task,
		ServiceID:          service.ID,
		Slot:               slot,
		Status: api.TaskStatus{
			State:     api.TaskStateNew,
			Timestamp: ptypes.MustTimestampProto(time.Now()),
			Message:   "created",
		},
		Endpoint: &api.Endpoint{
			Spec: service.Spec.Endpoint.Copy(),
		},
		DesiredState: api.TaskStateRunning,
		LogDriver:    logDriver,
	}

	// In global mode we also set the NodeID
	if nodeID != "" {
		task.NodeID = nodeID
	}

	// Assign name based on task name schema
	name := store.TaskName(&task)
	task.Annotations = api.Annotations{Name: name}

	return &task
}
Ejemplo n.º 24
0
// CreateService creates and return a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
// - Returns `AlreadyExists` if the ServiceID conflicts.
// - Returns an error if the creation fails.
func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
	if err := validateServiceSpec(request.Spec); err != nil {
		return nil, err
	}

	if err := s.validateNetworks(request.Spec.Networks); err != nil {
		return nil, err
	}

	if err := s.checkPortConflicts(request.Spec, ""); err != nil {
		return nil, err
	}

	// TODO(aluzzardi): Consider using `Name` as a primary key to handle
	// duplicate creations. See #65
	service := &api.Service{
		ID:   identity.NewID(),
		Spec: *request.Spec,
	}

	err := s.store.Update(func(tx store.Tx) error {
		// Check to see if all the secrets being added exist as objects
		// in our datastore
		err := s.checkSecretExistence(tx, request.Spec)
		if err != nil {
			return err
		}

		return store.CreateService(tx, service)
	})
	if err != nil {
		return nil, err
	}

	return &api.CreateServiceResponse{
		Service: service,
	}, nil
}
Ejemplo n.º 25
0
// createInternalNetwork creates an internal network for testing. it is the same
// as Server.CreateNetwork except without the label check.
func (s *Server) createInternalNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) {
	if err := validateNetworkSpec(request.Spec); err != nil {
		return nil, err
	}

	// TODO(mrjana): Consider using `Name` as a primary key to handle
	// duplicate creations. See #65
	n := &api.Network{
		ID:   identity.NewID(),
		Spec: *request.Spec,
	}

	err := s.store.Update(func(tx store.Tx) error {
		return store.CreateNetwork(tx, n)
	})
	if err != nil {
		return nil, err
	}

	return &api.CreateNetworkResponse{
		Network: n,
	}, nil
}
Ejemplo n.º 26
0
// assumes spec is not nil
func secretFromSecretSpec(spec *api.SecretSpec) *api.Secret {
	return &api.Secret{
		ID:   identity.NewID(),
		Spec: *spec,
	}
}
Ejemplo n.º 27
0
// LoadOrCreateSecurityConfig encapsulates the security logic behind joining a cluster.
// Every node requires at least a set of TLS certificates with which to join the cluster with.
// In the case of a manager, these certificates will be used both for client and server credentials.
func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, proposedRole string, remotes remotes.Remotes, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
	ctx = log.WithModule(ctx, "tls")
	paths := NewConfigPaths(baseCertDir)

	var (
		rootCA                         RootCA
		serverTLSCreds, clientTLSCreds *MutableTLSCreds
		err                            error
	)

	// Check if we already have a CA certificate on disk. We need a CA to have a valid SecurityConfig
	rootCA, err = GetLocalRootCA(baseCertDir)
	switch err {
	case nil:
		log.G(ctx).Debug("loaded CA certificate")
	case ErrNoLocalRootCA:
		log.G(ctx).WithError(err).Debugf("failed to load local CA certificate")

		// Get a digest for the optional CA hash string that we've been provided
		// If we were provided a non-empty string, and it is an invalid hash, return
		// otherwise, allow the invalid digest through.
		var d digest.Digest
		if token != "" {
			d, err = getCAHashFromToken(token)
			if err != nil {
				return nil, err
			}
		}

		// Get the remote CA certificate, verify integrity with the
		// hash provided. Retry up to 5 times, in case the manager we
		// first try to contact is not responding properly (it may have
		// just been demoted, for example).

		for i := 0; i != 5; i++ {
			rootCA, err = GetRemoteCA(ctx, d, remotes)
			if err == nil {
				break
			}
			log.G(ctx).WithError(err).Errorf("failed to retrieve remote root CA certificate")
		}
		if err != nil {
			return nil, err
		}

		// Save root CA certificate to disk
		if err = saveRootCA(rootCA, paths.RootCA); err != nil {
			return nil, err
		}

		log.G(ctx).Debugf("retrieved remote CA certificate: %s", paths.RootCA.Cert)
	default:
		return nil, err
	}

	// At this point we've successfully loaded the CA details from disk, or
	// successfully downloaded them remotely. The next step is to try to
	// load our certificates.
	clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
	if err != nil {
		log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", paths.Node.Cert)

		var (
			tlsKeyPair *tls.Certificate
			err        error
		)

		if rootCA.CanSign() {
			// Create a new random ID for this certificate
			cn := identity.NewID()
			org := identity.NewID()

			if nodeInfo != nil {
				nodeInfo <- api.IssueNodeCertificateResponse{
					NodeID:         cn,
					NodeMembership: api.NodeMembershipAccepted,
				}
			}
			tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
			if err != nil {
				log.G(ctx).WithFields(logrus.Fields{
					"node.id":   cn,
					"node.role": proposedRole,
				}).WithError(err).Errorf("failed to issue and save new certificate")
				return nil, err
			}

			log.G(ctx).WithFields(logrus.Fields{
				"node.id":   cn,
				"node.role": proposedRole,
			}).Debug("issued new TLS certificate")
		} else {
			// There was an error loading our Credentials, let's get a new certificate issued
			// Last argument is nil because at this point we don't have any valid TLS creds
			tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, paths.Node, token, remotes, nil, nodeInfo)
			if err != nil {
				log.G(ctx).WithError(err).Error("failed to request save new certificate")
				return nil, err
			}
		}
		// Create the Server TLS Credentials for this node. These will not be used by workers.
		serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
		if err != nil {
			return nil, err
		}

		// Create a TLSConfig to be used when this node connects as a client to another remote node.
		// We're using ManagerRole as remote serverName for TLS host verification
		clientTLSCreds, err = rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole)
		if err != nil {
			return nil, err
		}
		log.G(ctx).WithFields(logrus.Fields{
			"node.id":   clientTLSCreds.NodeID(),
			"node.role": clientTLSCreds.Role(),
		}).Debugf("new node credentials generated: %s", paths.Node.Cert)
	} else {
		if nodeInfo != nil {
			nodeInfo <- api.IssueNodeCertificateResponse{
				NodeID:         clientTLSCreds.NodeID(),
				NodeMembership: api.NodeMembershipAccepted,
			}
		}
		log.G(ctx).WithFields(logrus.Fields{
			"node.id":   clientTLSCreds.NodeID(),
			"node.role": clientTLSCreds.Role(),
		}).Debug("loaded node credentials")
	}

	return NewSecurityConfig(&rootCA, clientTLSCreds, serverTLSCreds), nil
}
Ejemplo n.º 28
0
func genTaskStatus() *api.TaskStatus {
	return &api.TaskStatus{
		State:   taskStates[rand.Intn(len(taskStates))],
		Message: identity.NewID(), // just put some garbage here.
	}
}
Ejemplo n.º 29
0
func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
	na, err := networkallocator.New()
	if err != nil {
		return err
	}

	nc := &networkContext{
		nwkAllocator:        na,
		unallocatedTasks:    make(map[string]*api.Task),
		unallocatedServices: make(map[string]*api.Service),
		unallocatedNetworks: make(map[string]*api.Network),
		ingressNetwork:      newIngressNetwork(),
	}
	a.netCtx = nc
	defer func() {
		// Clear a.netCtx if initialization was unsuccessful.
		if err != nil {
			a.netCtx = nil
		}
	}()

	// Check if we have the ingress network. If not found create
	// it before reading all network objects for allocation.
	var networks []*api.Network
	a.store.View(func(tx store.ReadTx) {
		networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
		if len(networks) > 0 {
			nc.ingressNetwork = networks[0]
		}
	})
	if err != nil {
		return errors.Wrap(err, "failed to find ingress network during init")
	}

	// If ingress network is not found, create one right away
	// using the predefined template.
	if len(networks) == 0 {
		if err := a.store.Update(func(tx store.Tx) error {
			nc.ingressNetwork.ID = identity.NewID()
			if err := store.CreateNetwork(tx, nc.ingressNetwork); err != nil {
				return err
			}

			return nil
		}); err != nil {
			return errors.Wrap(err, "failed to create ingress network")
		}

		a.store.View(func(tx store.ReadTx) {
			networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
			if len(networks) > 0 {
				nc.ingressNetwork = networks[0]
			}
		})
		if err != nil {
			return errors.Wrap(err, "failed to find ingress network after creating it")
		}

	}

	// Try to complete ingress network allocation before anything else so
	// that the we can get the preferred subnet for ingress
	// network.
	if !na.IsAllocated(nc.ingressNetwork) {
		if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil {
			log.G(ctx).WithError(err).Error("failed allocating ingress network during init")
		} else if _, err := a.store.Batch(func(batch *store.Batch) error {
			if err := a.commitAllocatedNetwork(ctx, batch, nc.ingressNetwork); err != nil {
				log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
			}
			return nil
		}); err != nil {
			log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init")
		}
	}

	// Allocate networks in the store so far before we started
	// watching.
	a.store.View(func(tx store.ReadTx) {
		networks, err = store.FindNetworks(tx, store.All)
	})
	if err != nil {
		return errors.Wrap(err, "error listing all networks in store while trying to allocate during init")
	}

	var allocatedNetworks []*api.Network
	for _, n := range networks {
		if na.IsAllocated(n) {
			continue
		}

		if err := a.allocateNetwork(ctx, n); err != nil {
			log.G(ctx).WithError(err).Errorf("failed allocating network %s during init", n.ID)
			continue
		}
		allocatedNetworks = append(allocatedNetworks, n)
	}

	if _, err := a.store.Batch(func(batch *store.Batch) error {
		for _, n := range allocatedNetworks {
			if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
				log.G(ctx).WithError(err).Errorf("failed committing allocation of network %s during init", n.ID)
			}
		}
		return nil
	}); err != nil {
		log.G(ctx).WithError(err).Error("failed committing allocation of networks during init")
	}

	// Allocate nodes in the store so far before we process watched events.
	var nodes []*api.Node
	a.store.View(func(tx store.ReadTx) {
		nodes, err = store.FindNodes(tx, store.All)
	})
	if err != nil {
		return errors.Wrap(err, "error listing all nodes in store while trying to allocate during init")
	}

	var allocatedNodes []*api.Node
	for _, node := range nodes {
		if na.IsNodeAllocated(node) {
			continue
		}

		if node.Attachment == nil {
			node.Attachment = &api.NetworkAttachment{}
		}

		node.Attachment.Network = nc.ingressNetwork.Copy()
		if err := a.allocateNode(ctx, node); err != nil {
			log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s during init", node.ID)
			continue
		}

		allocatedNodes = append(allocatedNodes, node)
	}

	if _, err := a.store.Batch(func(batch *store.Batch) error {
		for _, node := range allocatedNodes {
			if err := a.commitAllocatedNode(ctx, batch, node); err != nil {
				log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s during init", node.ID)
			}
		}
		return nil
	}); err != nil {
		log.G(ctx).WithError(err).Error("Failed to commit allocation of network resources for nodes during init")
	}

	// Allocate services in the store so far before we process watched events.
	var services []*api.Service
	a.store.View(func(tx store.ReadTx) {
		services, err = store.FindServices(tx, store.All)
	})
	if err != nil {
		return errors.Wrap(err, "error listing all services in store while trying to allocate during init")
	}

	var allocatedServices []*api.Service
	for _, s := range services {
		if nc.nwkAllocator.IsServiceAllocated(s) {
			continue
		}

		if err := a.allocateService(ctx, s); err != nil {
			log.G(ctx).WithError(err).Errorf("failed allocating service %s during init", s.ID)
			continue
		}
		allocatedServices = append(allocatedServices, s)
	}

	if _, err := a.store.Batch(func(batch *store.Batch) error {
		for _, s := range allocatedServices {
			if err := a.commitAllocatedService(ctx, batch, s); err != nil {
				log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
			}
		}
		return nil
	}); err != nil {
		log.G(ctx).WithError(err).Error("failed committing allocation of services during init")
	}

	// Allocate tasks in the store so far before we started watching.
	var (
		tasks          []*api.Task
		allocatedTasks []*api.Task
	)
	a.store.View(func(tx store.ReadTx) {
		tasks, err = store.FindTasks(tx, store.All)
	})
	if err != nil {
		return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init")
	}

	for _, t := range tasks {
		if taskDead(t) {
			continue
		}

		var s *api.Service
		if t.ServiceID != "" {
			a.store.View(func(tx store.ReadTx) {
				s = store.GetService(tx, t.ServiceID)
			})
		}

		// Populate network attachments in the task
		// based on service spec.
		a.taskCreateNetworkAttachments(t, s)

		if taskReadyForNetworkVote(t, s, nc) {
			if t.Status.State >= api.TaskStatePending {
				continue
			}

			if a.taskAllocateVote(networkVoter, t.ID) {
				// If the task is not attached to any network, network
				// allocators job is done. Immediately cast a vote so
				// that the task can be moved to ALLOCATED state as
				// soon as possible.
				allocatedTasks = append(allocatedTasks, t)
			}
			continue
		}

		err := a.allocateTask(ctx, t)
		if err == nil {
			allocatedTasks = append(allocatedTasks, t)
		} else if err != errNoChanges {
			log.G(ctx).WithError(err).Errorf("failed allocating task %s during init", t.ID)
			nc.unallocatedTasks[t.ID] = t
		}
	}

	if _, err := a.store.Batch(func(batch *store.Batch) error {
		for _, t := range allocatedTasks {
			if err := a.commitAllocatedTask(ctx, batch, t); err != nil {
				log.G(ctx).WithError(err).Errorf("failed committing allocation of task %s during init", t.ID)
			}
		}

		return nil
	}); err != nil {
		log.G(ctx).WithError(err).Error("failed committing allocation of tasks during init")
	}

	return nil
}
Ejemplo n.º 30
0
// LoadOrCreateSecurityConfig encapsulates the security logic behind joining a cluster.
// Every node requires at least a set of TLS certificates with which to join the cluster with.
// In the case of a manager, these certificates will be used both for client and server credentials.
func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
	paths := NewConfigPaths(baseCertDir)

	var (
		rootCA                         RootCA
		serverTLSCreds, clientTLSCreds *MutableTLSCreds
		err                            error
	)

	// Check if we already have a CA certificate on disk. We need a CA to have a valid SecurityConfig
	rootCA, err = GetLocalRootCA(baseCertDir)
	switch err {
	case nil:
		log.Debugf("loaded local CA certificate: %s.", paths.RootCA.Cert)
	case ErrNoLocalRootCA:
		log.Debugf("no valid local CA certificate found: %v", err)

		// Get a digest for the optional CA hash string that we've been provided
		// If we were provided a non-empty string, and it is an invalid hash, return
		// otherwise, allow the invalid digest through.
		d, err := digest.ParseDigest(caHash)
		if err != nil && caHash != "" {
			return nil, err
		}

		// Get the remote CA certificate, verify integrity with the hash provided
		rootCA, err = GetRemoteCA(ctx, d, picker)
		if err != nil {
			return nil, err
		}

		// Save root CA certificate to disk
		if err = saveRootCA(rootCA, paths.RootCA); err != nil {
			return nil, err
		}

		log.Debugf("downloaded remote CA certificate.")
	default:
		return nil, err
	}

	// At this point we've successfully loaded the CA details from disk, or successfully
	// downloaded them remotely.
	// The next step is to try to load our certificates.
	clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
	if err != nil {
		log.Debugf("no valid local TLS credentials found: %v", err)

		var (
			tlsKeyPair *tls.Certificate
			err        error
		)

		if rootCA.CanSign() {
			// Create a new random ID for this certificate
			cn := identity.NewID()
			org := identity.NewID()

			if nodeInfo != nil {
				nodeInfo <- api.IssueNodeCertificateResponse{
					NodeID:         cn,
					NodeMembership: api.NodeMembershipAccepted,
				}
			}
			tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
		} else {
			// There was an error loading our Credentials, let's get a new certificate issued
			// Last argument is nil because at this point we don't have any valid TLS creds
			tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, paths.Node, proposedRole, secret, picker, nil, nodeInfo)
			if err != nil {
				return nil, err
			}

		}
		// Create the Server TLS Credentials for this node. These will not be used by agents.
		serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
		if err != nil {
			return nil, err
		}

		// Create a TLSConfig to be used when this node connects as a client to another remote node.
		// We're using ManagerRole as remote serverName for TLS host verification
		clientTLSCreds, err = rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole)
		if err != nil {
			return nil, err
		}
		log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
	} else {
		if nodeInfo != nil {
			nodeInfo <- api.IssueNodeCertificateResponse{
				NodeID:         clientTLSCreds.NodeID(),
				NodeMembership: api.NodeMembershipAccepted,
			}
		}
		log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
	}

	return &SecurityConfig{
		rootCA: &rootCA,

		ServerTLSCreds: serverTLSCreds,
		ClientTLSCreds: clientTLSCreds,
	}, nil
}