コード例 #1
0
ファイル: reconciler.go プロジェクト: elodina/stack-deploy
func (r *Reconciler) reconcile(driver scheduler.SchedulerDriver, implicit bool) {
	if time.Now().Sub(r.reconcileTime) >= r.ReconcileDelay {
		r.taskLock.Lock()
		defer r.taskLock.Unlock()

		r.reconciles++
		r.reconcileTime = time.Now()

		if r.reconciles > r.ReconcileMaxTries {
			for task := range r.tasks {
				Logger.Info("Reconciling exceeded %d tries, sending killTask for task %s", r.ReconcileMaxTries, task)
				driver.KillTask(util.NewTaskID(task))
			}
			r.reconciles = 0
		} else {
			if implicit {
				driver.ReconcileTasks(nil)
			} else {
				statuses := make([]*mesos.TaskStatus, 0)
				for task := range r.tasks {
					Logger.Debug("Reconciling %d/%d task state for task id %s", r.reconciles, r.ReconcileMaxTries, task)
					statuses = append(statuses, util.NewTaskStatus(util.NewTaskID(task), mesos.TaskState_TASK_STAGING))
				}
				driver.ReconcileTasks(statuses)
			}
		}
	}
}
コード例 #2
0
ファイル: scheduler.go プロジェクト: elodina/syphon
func (this *ElodinaTransportScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
	log.Logger.Info("Trying to kill task %s", taskId.GetValue())
	var err error
	for i := 0; i <= this.config.KillTaskRetries; i++ {
		if _, err = driver.KillTask(taskId); err == nil {
			return nil
		}
	}
	return err
}
コード例 #3
0
ファイル: scheduler.go プロジェクト: stealthly/edge-test
func (this *TransformScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
	fmt.Printf("Trying to kill task %s\n", taskId.GetValue())

	var err error
	for i := 0; i <= this.config.KillTaskRetries; i++ {
		if _, err = driver.KillTask(taskId); err == nil {
			return nil
		}
	}
	return err
}
コード例 #4
0
ファイル: basicworker.go プロジェクト: lucmichalski/taurus
// KillJobTasks monitors all Jobs marked as STOPPED and kills all of their running Tasks
//
// KillJobTasks runs in a separate goroutine started in Worker.Start call
// It returns error if it can't read Job Store or the goroutine it is running in has been stopped
func (bw *BasicWorker) KillJobTasks(driver scheduler.SchedulerDriver) error {
	state := taurus.STOPPED
	errChan := make(chan error)
	ticker := time.NewTicker(StoreScanTick)
	go func() {
		var killErr error
	killer:
		for {
			select {
			case <-bw.done:
				ticker.Stop()
				killErr = nil
				log.Printf("Finishing %s Task queuer", state)
				break killer
			case <-ticker.C:
				jobs, err := bw.store.GetJobs(state)
				if err != nil {
					killErr = fmt.Errorf("Error reading %s Jobs: %s", state, err)
					break killer
				}
				for _, job := range jobs {
					ctx, cancel := context.WithTimeout(context.Background(), MasterTimeout)
					mesosTasks, err := taurus.MesosTasks(ctx, bw.master, job.Id, mesos.TaskState_TASK_RUNNING.Enum())
					if err != nil {
						log.Printf("Failed to read tasks for Job %s: %s", job.Id, err)
						cancel()
						continue
					}
					for taskId, _ := range mesosTasks {
						mesosTaskId := mesosutil.NewTaskID(taskId)
						killStatus, err := driver.KillTask(mesosTaskId)
						if err != nil {
							log.Printf("Mesos in state %s failed to kill the task %s: %s", killStatus, taskId, err)
							continue
						}
					}
				}
			}
		}
		errChan <- killErr
		log.Printf("%s tasks killer ticker stopped", state)
	}()

	return <-errChan
}
コード例 #5
0
func (s *MinerScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
	log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
	// If the mining server failed, kill all daemons, since they will be trying to talk to the failed mining server
	if strings.Contains(status.GetTaskId().GetValue(), "server") {
		s.minerServerRunning = false

		// kill all tasks
		statuses := make([]*mesos.TaskStatus, 0)
		_, err := driver.ReconcileTasks(statuses)
		if err != nil {
			panic(err)
		}

		for _, status := range statuses {
			driver.KillTask(status.TaskId)
		}
	}
}
コード例 #6
0
func (r *Reconciler) reconcile(driver scheduler.SchedulerDriver, implicit bool) {
	if time.Now().Sub(r.reconcileTime) >= r.ReconcileDelay {
		if !r.tasks.IsReconciling() {
			r.reconciles = 0
		}
		r.reconciles++
		r.reconcileTime = time.Now()

		if r.reconciles > r.ReconcileMaxTries {
			for _, task := range r.tasks.GetWithFilter(func(task Task) bool {
				return task.Data().State == TaskStateReconciling
			}) {
				if task.Data().TaskID != "" {
					Logger.Info("Reconciling exceeded %d tries for task %s, sending killTask for task %s", r.ReconcileMaxTries, task.Data().ID, task.Data().TaskID)
					driver.KillTask(util.NewTaskID(task.Data().TaskID))

					task.Data().Reset()
				}
			}
		} else {
			if implicit {
				driver.ReconcileTasks(nil)
			} else {
				statuses := make([]*mesos.TaskStatus, 0)
				for _, task := range r.tasks.GetAll() {
					if task.Data().TaskID != "" {
						task.Data().State = TaskStateReconciling
						Logger.Info("Reconciling %d/%d task state for id %s, task id %s", r.reconciles, r.ReconcileMaxTries, task.Data().ID, task.Data().TaskID)
						statuses = append(statuses, util.NewTaskStatus(util.NewTaskID(task.Data().TaskID), mesos.TaskState_TASK_STAGING))
					}
				}
				driver.ReconcileTasks(statuses)
			}
		}
	}
}
コード例 #7
0
ファイル: scheduler.go プロジェクト: niu-team/kubernetes
// reconcile an unknown (from the perspective of our registry) non-terminal task
func (k *KubernetesScheduler) reconcileNonTerminalTask(driver bindings.SchedulerDriver, taskStatus *mesos.TaskStatus) {
	// attempt to recover task from pod info:
	// - task data may contain an api.PodStatusResult; if status.reason == REASON_RECONCILIATION then status.data == nil
	// - the Name can be parsed by container.ParseFullName() to yield a pod Name and Namespace
	// - pull the pod metadata down from the api server
	// - perform task recovery based on pod metadata
	taskId := taskStatus.TaskId.GetValue()
	if taskStatus.GetReason() == mesos.TaskStatus_REASON_RECONCILIATION && taskStatus.GetSource() == mesos.TaskStatus_SOURCE_MASTER {
		// there will be no data in the task status that we can use to determine the associated pod
		switch taskStatus.GetState() {
		case mesos.TaskState_TASK_STAGING:
			// there is still hope for this task, don't kill it just yet
			//TODO(jdef) there should probably be a limit for how long we tolerate tasks stuck in this state
			return
		default:
			// for TASK_{STARTING,RUNNING} we should have already attempted to recoverTasks() for.
			// if the scheduler failed over before the executor fired TASK_STARTING, then we should *not*
			// be processing this reconciliation update before we process the one from the executor.
			// point: we don't know what this task is (perhaps there was unrecoverable metadata in the pod),
			// so it gets killed.
			log.Errorf("killing non-terminal, unrecoverable task %v", taskId)
		}
	} else if podStatus, err := podtask.ParsePodStatusResult(taskStatus); err != nil {
		// possible rogue pod exists at this point because we can't identify it; should kill the task
		log.Errorf("possible rogue pod; illegal task status data for task %v, expected an api.PodStatusResult: %v", taskId, err)
	} else if name, namespace, err := container.ParsePodFullName(podStatus.Name); err != nil {
		// possible rogue pod exists at this point because we can't identify it; should kill the task
		log.Errorf("possible rogue pod; illegal api.PodStatusResult, unable to parse full pod name from: '%v' for task %v: %v",
			podStatus.Name, taskId, err)
	} else if pod, err := k.client.Pods(namespace).Get(name); err == nil {
		if t, ok, err := podtask.RecoverFrom(*pod); ok {
			log.Infof("recovered task %v from metadata in pod %v/%v", taskId, namespace, name)
			_, err := k.taskRegistry.Register(t, nil)
			if err != nil {
				// someone beat us to it?!
				log.Warningf("failed to register recovered task: %v", err)
				return
			} else {
				k.taskRegistry.UpdateStatus(taskStatus)
			}
			return
		} else if err != nil {
			//should kill the pod and the task
			log.Errorf("killing pod, failed to recover task from pod %v/%v: %v", namespace, name, err)
			if err := k.client.Pods(namespace).Delete(name, nil); err != nil {
				log.Errorf("failed to delete pod %v/%v: %v", namespace, name, err)
			}
		} else {
			//this is pretty unexpected: we received a TASK_{STARTING,RUNNING} message, but the apiserver's pod
			//metadata is not appropriate for task reconstruction -- which should almost certainly never
			//be the case unless someone swapped out the pod on us (and kept the same namespace/name) while
			//we were failed over.

			//kill this task, allow the newly launched scheduler to schedule the new pod
			log.Warningf("unexpected pod metadata for task %v in apiserver, assuming new unscheduled pod spec: %+v", taskId, pod)
		}
	} else if errors.IsNotFound(err) {
		// pod lookup failed, should delete the task since the pod is no longer valid; may be redundant, that's ok
		log.Infof("killing task %v since pod %v/%v no longer exists", taskId, namespace, name)
	} else if errors.IsServerTimeout(err) {
		log.V(2).Infof("failed to reconcile task due to API server timeout: %v", err)
		return
	} else {
		log.Errorf("unexpected API server error, aborting reconcile for task %v: %v", taskId, err)
		return
	}
	if _, err := driver.KillTask(taskStatus.TaskId); err != nil {
		log.Errorf("failed to kill task %v: %v", taskId, err)
	}
}
コード例 #8
0
ファイル: scheduler.go プロジェクト: stealthly/edge-test
// mesos.Scheduler interface method.
// Invoked when resources have been offered to this framework.
func (this *TransformScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	fmt.Println("Received offers")

	if int(this.runningInstances) > this.config.Instances {
		toKill := int(this.runningInstances) - this.config.Instances
		for i := 0; i < toKill; i++ {
			driver.KillTask(this.tasks[i])
		}

		this.tasks = this.tasks[toKill:]
	}

	offersAndTasks := make(map[*mesos.Offer][]*mesos.TaskInfo)
	for _, offer := range offers {
		cpus := getScalarResources(offer, "cpus")
		mems := getScalarResources(offer, "mem")
		ports := getRangeResources(offer, "ports")

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo
		for int(this.getRunningInstances()) < this.config.Instances && this.config.CpuPerTask <= remainingCpus && this.config.MemPerTask <= remainingMems && len(ports) > 0 {
			port := this.takePort(&ports)
			taskPort := &mesos.Value_Range{Begin: port, End: port}
			taskId := &mesos.TaskID{
				Value: proto.String(fmt.Sprintf("golang-%s-%d", *offer.Hostname, *port)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String(taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: this.createExecutor(this.getRunningInstances(), *port),
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", float64(this.config.CpuPerTask)),
					util.NewScalarResource("mem", float64(this.config.MemPerTask)),
					util.NewRangesResource("ports", []*mesos.Value_Range{taskPort}),
				},
			}
			fmt.Printf("Prepared task: %s with offer %s for launch. Ports: %s\n", task.GetName(), offer.Id.GetValue(), taskPort)

			tasks = append(tasks, task)
			remainingCpus -= this.config.CpuPerTask
			remainingMems -= this.config.MemPerTask
			ports = ports[1:]

			this.tasks = append(this.tasks, taskId)
			this.incRunningInstances()
		}
		fmt.Printf("Launching %d tasks for offer %s\n", len(tasks), offer.Id.GetValue())
		offersAndTasks[offer] = tasks
	}

	unlaunchedTasks := this.config.Instances - int(this.getRunningInstances())
	if unlaunchedTasks > 0 {
		fmt.Printf("There are still %d tasks to be launched and no more resources are available.", unlaunchedTasks)
	}

	for _, offer := range offers {
		tasks := offersAndTasks[offer]
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
コード例 #9
0
ファイル: scheduler.go プロジェクト: puppetizeme/etcd-mesos
func (s *EtcdScheduler) reseedCluster(driver scheduler.SchedulerDriver) {
	// This CAS allows us to:
	//	1. ensure non-concurrent execution
	//	2. signal to shouldLaunch that we're already reseeding
	if !atomic.CompareAndSwapInt32(&s.reseeding, notReseeding, reseedUnderway) {
		return
	}
	atomic.AddUint32(&s.Stats.ClusterReseeds, 1)

	s.mut.Lock()
	s.state = Immutable

	defer func() {
		s.state = Mutable
		atomic.StoreInt32(&s.reseeding, notReseeding)
		s.mut.Unlock()
	}()

	candidates := rpc.RankReseedCandidates(s.running)
	if len(candidates) == 0 {
		log.Error("Failed to retrieve any candidates for reseeding! " +
			"No recovery possible!")
		driver.Abort()
	}

	killable := []string{}
	newSeed := ""
	log.Infof("Candidates for reseed: %+v", candidates)
	for _, node := range candidates {
		// 1. restart node with --force-new-cluster
		// 2. ensure it passes health check
		// 3. ensure its member list only contains itself
		// 4. kill everybody else
		if newSeed != "" {
			log.Warningf("Marking node %s from previous cluster as inferior", node.Node)
			killable = append(killable, node.Node)
		} else {
			log.Warningf("Attempting to re-seed cluster with candidate %s "+
				"with Raft index %d!", node.Node, node.RaftIndex)
			if s.reseedNode(node.Node, driver) {
				newSeed = node.Node
				continue
			}
			// Mark this node as killable, as it did not become healthy on time.
			log.Errorf("Failed reseed attempt on node %s, trying the next-best node.",
				node.Node)
			log.Warningf("Marking node %s from previous cluster as inferior", node.Node)
			killable = append(killable, node.Node)
		}
	}
	if newSeed != "" {
		log.Warningf("We think we have a new healthy leader: %s", newSeed)
		log.Warning("Terminating stale members of previous cluster.")
		for node, taskID := range s.tasks {
			if node != newSeed {
				log.Warningf("Killing old node %s", node)
				driver.KillTask(taskID)
			}
		}
	}
}