func validatePlacement(placement *api.Placement) error { if placement == nil { return nil } _, err := constraint.Parse(placement.Constraints) return err }
// updateService updates g.globalServices based on the current service value func (g *Orchestrator) updateService(service *api.Service) { var constraints []constraint.Constraint if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 { constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints) } g.globalServices[service.ID] = globalService{ Service: service, constraints: constraints, } }
// SetTask returns true when the filter is enable for a given task. func (f *ConstraintFilter) SetTask(t *api.Task) bool { if t.Spec.Placement == nil || len(t.Spec.Placement.Constraints) == 0 { return false } constraints, err := constraint.Parse(t.Spec.Placement.Constraints) if err != nil { // constraints have been validated at controlapi // if in any case it finds an error here, treat this task // as constraint filter disabled. return false } f.constraints = constraints return true }
func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) { // If the availability is "drain", the orchestrator will // shut down all tasks. // If the availability is "pause", we shouldn't touch // the tasks on this node. if node.Spec.Availability != api.NodeAvailabilityActive { return } var ( tasks []*api.Task err error ) ce.store.View(func(tx store.ReadTx) { tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) }) if err != nil { log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID) } var availableMemoryBytes, availableNanoCPUs int64 if node.Description != nil && node.Description.Resources != nil { availableMemoryBytes = node.Description.Resources.MemoryBytes availableNanoCPUs = node.Description.Resources.NanoCPUs } removeTasks := make(map[string]*api.Task) // TODO(aaronl): The set of tasks removed will be // nondeterministic because it depends on the order of // the slice returned from FindTasks. We could do // a separate pass over the tasks for each type of // resource, and sort by the size of the reservation // to remove the most resource-intensive tasks. for _, t := range tasks { if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning { continue } // Ensure that the task still meets scheduling // constraints. if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 { constraints, _ := constraint.Parse(t.Spec.Placement.Constraints) if !constraint.NodeMatches(constraints, node) { removeTasks[t.ID] = t continue } } // Ensure that the task assigned to the node // still satisfies the resource limits. if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil { if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes { removeTasks[t.ID] = t continue } if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs { removeTasks[t.ID] = t continue } availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs } } if len(removeTasks) != 0 { _, err := ce.store.Batch(func(batch *store.Batch) error { for _, t := range removeTasks { err := batch.Update(func(tx store.Tx) error { t = store.GetTask(tx, t.ID) if t == nil || t.DesiredState > api.TaskStateRunning { return nil } t.DesiredState = api.TaskStateShutdown return store.UpdateTask(tx, t) }) if err != nil { log.L.WithError(err).Errorf("failed to shut down task %s", t.ID) } } return nil }) if err != nil { log.L.WithError(err).Errorf("failed to shut down tasks") } } }