示例#1
0
func getRunnableSlotSlice(t *testing.T, s *store.MemoryStore, service *api.Service) []orchestrator.Slot {
	runnable, _, err := orchestrator.GetRunnableAndDeadSlots(s, service.ID)
	require.NoError(t, err)

	var runnableSlice []orchestrator.Slot
	for _, slot := range runnable {
		runnableSlice = append(runnableSlice, slot)
	}
	return runnableSlice
}
示例#2
0
func (r *Orchestrator) reconcile(ctx context.Context, service *api.Service) {
	runningSlots, deadSlots, err := orchestrator.GetRunnableAndDeadSlots(r.store, service.ID)
	if err != nil {
		log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
		return
	}

	numSlots := len(runningSlots)

	slotsSlice := make([]orchestrator.Slot, 0, numSlots)
	for _, slot := range runningSlots {
		slotsSlice = append(slotsSlice, slot)
	}

	deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
	specifiedSlots := int(deploy.Replicated.Replicas)

	switch {
	case specifiedSlots > numSlots:
		log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numSlots, specifiedSlots)
		// Update all current tasks then add missing tasks
		r.updater.Update(ctx, r.cluster, service, slotsSlice)
		_, err = r.store.Batch(func(batch *store.Batch) error {
			r.addTasks(ctx, batch, service, runningSlots, deadSlots, specifiedSlots-numSlots)
			r.deleteTasksMap(ctx, batch, deadSlots)
			return nil
		})
		if err != nil {
			log.G(ctx).WithError(err).Errorf("reconcile batch failed")
		}

	case specifiedSlots < numSlots:
		// Update up to N tasks then remove the extra
		log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numSlots, specifiedSlots)

		// Preferentially remove tasks on the nodes that have the most
		// copies of this service, to leave a more balanced result.

		// First sort tasks such that tasks which are currently running
		// (in terms of observed state) appear before non-running tasks.
		// This will cause us to prefer to remove non-running tasks, all
		// other things being equal in terms of node balance.

		sort.Sort(slotsByRunningState(slotsSlice))

		// Assign each task an index that counts it as the nth copy of
		// of the service on its node (1, 2, 3, ...), and sort the
		// tasks by this counter value.

		slotsByNode := make(map[string]int)
		slotsWithIndices := make(slotsByIndex, 0, numSlots)

		for _, slot := range slotsSlice {
			if len(slot) == 1 && slot[0].NodeID != "" {
				slotsByNode[slot[0].NodeID]++
				slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: slotsByNode[slot[0].NodeID]})
			} else {
				slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: -1})
			}
		}

		sort.Sort(slotsWithIndices)

		sortedSlots := make([]orchestrator.Slot, 0, numSlots)
		for _, slot := range slotsWithIndices {
			sortedSlots = append(sortedSlots, slot.slot)
		}

		r.updater.Update(ctx, r.cluster, service, sortedSlots[:specifiedSlots])
		_, err = r.store.Batch(func(batch *store.Batch) error {
			r.deleteTasksMap(ctx, batch, deadSlots)
			r.deleteTasks(ctx, batch, sortedSlots[specifiedSlots:])
			return nil
		})
		if err != nil {
			log.G(ctx).WithError(err).Errorf("reconcile batch failed")
		}

	case specifiedSlots == numSlots:
		_, err = r.store.Batch(func(batch *store.Batch) error {
			r.deleteTasksMap(ctx, batch, deadSlots)
			return nil
		})
		if err != nil {
			log.G(ctx).WithError(err).Errorf("reconcile batch failed")
		}
		// Simple update, no scaling - update all tasks.
		r.updater.Update(ctx, r.cluster, service, slotsSlice)
	}
}