예제 #1
0
func TestHeartbeatTimeout(t *testing.T) {
	t.Parallel()

	cfg := DefaultConfig()
	cfg.HeartbeatPeriod = 100 * time.Millisecond
	cfg.HeartbeatEpsilon = 0
	gd, err := startDispatcher(cfg)
	assert.NoError(t, err)
	defer gd.Close()

	var expectedSessionID string
	{
		stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
		assert.NoError(t, err)
		resp, err := stream.Recv()
		assert.NoError(t, err)
		assert.NotEmpty(t, resp.SessionID)
		expectedSessionID = resp.SessionID

	}
	time.Sleep(500 * time.Millisecond)

	gd.Store.View(func(readTx store.ReadTx) {
		storeNodes, err := store.FindNodes(readTx, store.ByIDPrefix(gd.SecurityConfigs[0].ClientTLSCreds.NodeID()))
		assert.NoError(t, err)
		assert.NotEmpty(t, storeNodes)
		assert.Equal(t, api.NodeStatus_DOWN, storeNodes[0].Status.State)
	})

	// check that node is deregistered
	resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
	assert.Nil(t, resp)
	assert.Error(t, err)
	assert.Equal(t, grpc.ErrorDesc(err), ErrNodeNotRegistered.Error())
}
예제 #2
0
파일: secret.go 프로젝트: harche/docker
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in
// `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`.
// - Returns an error if listing fails.
func (s *Server) ListSecrets(ctx context.Context, request *api.ListSecretsRequest) (*api.ListSecretsResponse, error) {
	var (
		secrets     []*api.Secret
		respSecrets []*api.Secret
		err         error
		byFilters   []store.By
		by          store.By
		labels      map[string]string
	)

	// return all secrets that match either any of the names or any of the name prefixes (why would you give both?)
	if request.Filters != nil {
		for _, name := range request.Filters.Names {
			byFilters = append(byFilters, store.ByName(name))
		}
		for _, prefix := range request.Filters.NamePrefixes {
			byFilters = append(byFilters, store.ByNamePrefix(prefix))
		}
		for _, prefix := range request.Filters.IDPrefixes {
			byFilters = append(byFilters, store.ByIDPrefix(prefix))
		}
		labels = request.Filters.Labels
	}

	switch len(byFilters) {
	case 0:
		by = store.All
	case 1:
		by = byFilters[0]
	default:
		by = store.Or(byFilters...)
	}

	s.store.View(func(tx store.ReadTx) {
		secrets, err = store.FindSecrets(tx, by)
	})
	if err != nil {
		return nil, err
	}

	// strip secret data from the secret, filter by label, and filter out all internal secrets
	for _, secret := range secrets {
		if secret.Internal || !filterMatchLabels(secret.Spec.Annotations.Labels, labels) {
			continue
		}
		secret.Spec.Data = nil // clean the actual secret data so it's never returned
		respSecrets = append(respSecrets, secret)
	}

	return &api.ListSecretsResponse{Secrets: respSecrets}, nil
}
예제 #3
0
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
	nodeInfo, err := ca.RemoteNode(stream.Context())
	if err != nil {
		return err
	}
	nodeID := nodeInfo.NodeID

	dctx, err := d.isRunningLocked()
	if err != nil {
		return err
	}

	fields := logrus.Fields{
		"node.id":      nodeID,
		"node.session": r.SessionID,
		"method":       "(*Dispatcher).Assignments",
	}
	if nodeInfo.ForwardedBy != nil {
		fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
	}
	log := log.G(stream.Context()).WithFields(fields)
	log.Debugf("")

	if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
		return err
	}

	var (
		sequence  int64
		appliesTo string
		initial   api.AssignmentsMessage
	)
	tasksMap := make(map[string]*api.Task)
	tasksUsingSecret := make(map[string]map[string]struct{})

	sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
		sequence++
		msg.AppliesTo = appliesTo
		msg.ResultsIn = strconv.FormatInt(sequence, 10)
		appliesTo = msg.ResultsIn
		msg.Type = assignmentType

		if err := stream.Send(&msg); err != nil {
			return err
		}
		return nil
	}

	// returns a slice of new secrets to send down
	addSecretsForTask := func(readTx store.ReadTx, t *api.Task) []*api.Secret {
		container := t.Spec.GetContainer()
		if container == nil {
			return nil
		}
		var newSecrets []*api.Secret
		for _, secretRef := range container.Secrets {
			// Empty ID prefix will return all secrets. Bail if there is no SecretID
			if secretRef.SecretID == "" {
				log.Debugf("invalid secret reference")
				continue
			}
			secretID := secretRef.SecretID
			log := log.WithFields(logrus.Fields{
				"secret.id":   secretID,
				"secret.name": secretRef.SecretName,
			})

			if len(tasksUsingSecret[secretID]) == 0 {
				tasksUsingSecret[secretID] = make(map[string]struct{})

				secrets, err := store.FindSecrets(readTx, store.ByIDPrefix(secretID))
				if err != nil {
					log.WithError(err).Errorf("error retrieving secret")
					continue
				}
				if len(secrets) != 1 {
					log.Debugf("secret not found")
					continue
				}

				// If the secret was found and there was one result
				// (there should never be more than one because of the
				// uniqueness constraint), add this secret to our
				// initial set that we send down.
				newSecrets = append(newSecrets, secrets[0])
			}
			tasksUsingSecret[secretID][t.ID] = struct{}{}
		}

		return newSecrets
	}

	// TODO(aaronl): Also send node secrets that should be exposed to
	// this node.
	nodeTasks, cancel, err := store.ViewAndWatch(
		d.store,
		func(readTx store.ReadTx) error {
			tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
			if err != nil {
				return err
			}

			for _, t := range tasks {
				// We only care about tasks that are ASSIGNED or
				// higher. If the state is below ASSIGNED, the
				// task may not meet the constraints for this
				// node, so we have to be careful about sending
				// secrets associated with it.
				if t.Status.State < api.TaskStateAssigned {
					continue
				}

				tasksMap[t.ID] = t
				taskChange := &api.AssignmentChange{
					Assignment: &api.Assignment{
						Item: &api.Assignment_Task{
							Task: t,
						},
					},
					Action: api.AssignmentChange_AssignmentActionUpdate,
				}
				initial.Changes = append(initial.Changes, taskChange)
				// Only send secrets down if these tasks are in < RUNNING
				if t.Status.State <= api.TaskStateRunning {
					newSecrets := addSecretsForTask(readTx, t)
					for _, secret := range newSecrets {
						secretChange := &api.AssignmentChange{
							Assignment: &api.Assignment{
								Item: &api.Assignment_Secret{
									Secret: secret,
								},
							},
							Action: api.AssignmentChange_AssignmentActionUpdate,
						}

						initial.Changes = append(initial.Changes, secretChange)
					}
				}
			}
			return nil
		},
		state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
		state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
		state.EventUpdateSecret{},
		state.EventDeleteSecret{},
	)
	if err != nil {
		return err
	}
	defer cancel()

	if err := sendMessage(initial, api.AssignmentsMessage_COMPLETE); err != nil {
		return err
	}

	for {
		// Check for session expiration
		if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
			return err
		}

		// bursty events should be processed in batches and sent out together
		var (
			update          api.AssignmentsMessage
			modificationCnt int
			batchingTimer   *time.Timer
			batchingTimeout <-chan time.Time
			updateTasks     = make(map[string]*api.Task)
			updateSecrets   = make(map[string]*api.Secret)
			removeTasks     = make(map[string]struct{})
			removeSecrets   = make(map[string]struct{})
		)

		oneModification := func() {
			modificationCnt++

			if batchingTimer != nil {
				batchingTimer.Reset(batchingWaitTime)
			} else {
				batchingTimer = time.NewTimer(batchingWaitTime)
				batchingTimeout = batchingTimer.C
			}
		}

		// Release the secrets references from this task
		releaseSecretsForTask := func(t *api.Task) bool {
			var modified bool
			container := t.Spec.GetContainer()
			if container == nil {
				return modified
			}

			for _, secretRef := range container.Secrets {
				secretID := secretRef.SecretID
				delete(tasksUsingSecret[secretID], t.ID)
				if len(tasksUsingSecret[secretID]) == 0 {
					// No tasks are using the secret anymore
					delete(tasksUsingSecret, secretID)
					removeSecrets[secretID] = struct{}{}
					modified = true
				}
			}

			return modified
		}

		// The batching loop waits for 50 ms after the most recent
		// change, or until modificationBatchLimit is reached. The
		// worst case latency is modificationBatchLimit * batchingWaitTime,
		// which is 10 seconds.
	batchingLoop:
		for modificationCnt < modificationBatchLimit {
			select {
			case event := <-nodeTasks:
				switch v := event.(type) {
				// We don't monitor EventCreateTask because tasks are
				// never created in the ASSIGNED state. First tasks are
				// created by the orchestrator, then the scheduler moves
				// them to ASSIGNED. If this ever changes, we will need
				// to monitor task creations as well.
				case state.EventUpdateTask:
					// We only care about tasks that are ASSIGNED or
					// higher.
					if v.Task.Status.State < api.TaskStateAssigned {
						continue
					}

					if oldTask, exists := tasksMap[v.Task.ID]; exists {
						// States ASSIGNED and below are set by the orchestrator/scheduler,
						// not the agent, so tasks in these states need to be sent to the
						// agent even if nothing else has changed.
						if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
							// this update should not trigger a task change for the agent
							tasksMap[v.Task.ID] = v.Task
							// If this task got updated to a final state, let's release
							// the secrets that are being used by the task
							if v.Task.Status.State > api.TaskStateRunning {
								// If releasing the secrets caused a secret to be
								// removed from an agent, mark one modification
								if releaseSecretsForTask(v.Task) {
									oneModification()
								}
							}
							continue
						}
					} else if v.Task.Status.State <= api.TaskStateRunning {
						// If this task wasn't part of the assignment set before, and it's <= RUNNING
						// add the secrets it references to the secrets assignment.
						// Task states > RUNNING are worker reported only, are never created in
						// a > RUNNING state.
						var newSecrets []*api.Secret
						d.store.View(func(readTx store.ReadTx) {
							newSecrets = addSecretsForTask(readTx, v.Task)
						})
						for _, secret := range newSecrets {
							updateSecrets[secret.ID] = secret
						}
					}
					tasksMap[v.Task.ID] = v.Task
					updateTasks[v.Task.ID] = v.Task

					oneModification()
				case state.EventDeleteTask:
					if _, exists := tasksMap[v.Task.ID]; !exists {
						continue
					}

					removeTasks[v.Task.ID] = struct{}{}

					delete(tasksMap, v.Task.ID)

					// Release the secrets being used by this task
					// Ignoring the return here. We will always mark
					// this as a modification, since a task is being
					// removed.
					releaseSecretsForTask(v.Task)

					oneModification()
				// TODO(aaronl): For node secrets, we'll need to handle
				// EventCreateSecret.
				case state.EventUpdateSecret:
					if _, exists := tasksUsingSecret[v.Secret.ID]; !exists {
						continue
					}
					log.Debugf("Secret %s (ID: %d) was updated though it was still referenced by one or more tasks",
						v.Secret.Spec.Annotations.Name, v.Secret.ID)

				case state.EventDeleteSecret:
					if _, exists := tasksUsingSecret[v.Secret.ID]; !exists {
						continue
					}
					log.Debugf("Secret %s (ID: %d) was deleted though it was still referenced by one or more tasks",
						v.Secret.Spec.Annotations.Name, v.Secret.ID)
				}
			case <-batchingTimeout:
				break batchingLoop
			case <-stream.Context().Done():
				return stream.Context().Err()
			case <-dctx.Done():
				return dctx.Err()
			}
		}

		if batchingTimer != nil {
			batchingTimer.Stop()
		}

		if modificationCnt > 0 {
			for id, task := range updateTasks {
				if _, ok := removeTasks[id]; !ok {
					taskChange := &api.AssignmentChange{
						Assignment: &api.Assignment{
							Item: &api.Assignment_Task{
								Task: task,
							},
						},
						Action: api.AssignmentChange_AssignmentActionUpdate,
					}

					update.Changes = append(update.Changes, taskChange)
				}
			}
			for id, secret := range updateSecrets {
				// If, due to multiple updates, this secret is no longer in use,
				// don't send it down.
				if len(tasksUsingSecret[id]) == 0 {
					// delete this secret for the secrets to be updated
					// so that deleteSecrets knows the current list
					delete(updateSecrets, id)
					continue
				}
				secretChange := &api.AssignmentChange{
					Assignment: &api.Assignment{
						Item: &api.Assignment_Secret{
							Secret: secret,
						},
					},
					Action: api.AssignmentChange_AssignmentActionUpdate,
				}

				update.Changes = append(update.Changes, secretChange)
			}
			for id := range removeTasks {
				taskChange := &api.AssignmentChange{
					Assignment: &api.Assignment{
						Item: &api.Assignment_Task{
							Task: &api.Task{ID: id},
						},
					},
					Action: api.AssignmentChange_AssignmentActionRemove,
				}

				update.Changes = append(update.Changes, taskChange)
			}
			for id := range removeSecrets {
				// If this secret is also being sent on the updated set
				// don't also add it to the removed set
				if _, ok := updateSecrets[id]; ok {
					continue
				}

				secretChange := &api.AssignmentChange{
					Assignment: &api.Assignment{
						Item: &api.Assignment_Secret{
							Secret: &api.Secret{ID: id},
						},
					},
					Action: api.AssignmentChange_AssignmentActionRemove,
				}

				update.Changes = append(update.Changes, secretChange)
			}

			if err := sendMessage(update, api.AssignmentsMessage_INCREMENTAL); err != nil {
				return err
			}
		}
	}
}