コード例 #1
0
ファイル: api.go プロジェクト: himanshugpt/evergreen
// helper function for releasing the global lock
func releaseGlobalLock(client, taskId string) {
	evergreen.Logger.Logf(slogger.DEBUG, "Attempting to release global lock for %v (remote addr: %v)", taskId, client)
	if err := db.ReleaseGlobalLock(client); err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error releasing global lock for %v (remote addr: %v) - this is really bad: %v", taskId, client, err)
	}
	evergreen.Logger.Logf(slogger.DEBUG, "Released global lock for %v (remote addr: %v)", taskId, client)
}
コード例 #2
0
ファイル: runner.go プロジェクト: himanshugpt/evergreen
func (r *Runner) Run(config *evergreen.Settings) error {
	lockAcquired, err := db.WaitTillAcquireGlobalLock(RunnerName, db.LockTimeout)
	if err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "error acquiring global lock: %v", err)
	}

	if !lockAcquired {
		return evergreen.Logger.Errorf(slogger.ERROR, "timed out acquiring global lock")
	}

	defer func() {
		if err := db.ReleaseGlobalLock(RunnerName); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "error releasing global lock: %v", err)
		}
	}()

	startTime := time.Now()
	evergreen.Logger.Logf(slogger.INFO, "Starting monitor at time %v", startTime)

	if err := RunAllMonitoring(config); err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "error running monitor: %v", err)
	}

	runtime := time.Now().Sub(startTime)
	if err = model.SetProcessRuntimeCompleted(RunnerName, runtime); err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "error updating process status: %v", err)
	}
	evergreen.Logger.Logf(slogger.INFO, "Monitor took %v to run", runtime)
	return nil
}
コード例 #3
0
ファイル: monitor.go プロジェクト: tychoish/evergreen
// withGlobalLock is a wrapper for grabbing the global lock for each segment of the monitor.
func withGlobalLock(name string, f func() []error) (errs []error) {
	evergreen.Logger.Logf(slogger.DEBUG, "Attempting to acquire global lock for monitor %v", name)
	// sleep for 1 second to give other spinning locks a chance to preempt this one
	time.Sleep(time.Second)
	acquired, err := db.WaitTillAcquireGlobalLock(name, db.LockTimeout)
	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error acquiring global lock for monitor", err)
		return []error{fmt.Errorf("error acquiring global lock for %v: %v", name, err)}
	}
	if !acquired {
		evergreen.Logger.Errorf(slogger.ERROR,
			"Timed out attempting to acquire global lock for monitor %v", name)
		return []error{fmt.Errorf("timed out acquiring global lock for monitor %v", name)}
	}
	defer func() {
		evergreen.Logger.Logf(slogger.DEBUG, "Releasing global lock for monitor %v", name)
		if err := db.ReleaseGlobalLock(name); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR,
				"Error releasing global lock for monitor %v: %v", name, err)
			errs = append(errs, fmt.Errorf("error releasing global lock for monitor %v: %v", name, err))
		} else {
			evergreen.Logger.Logf(slogger.DEBUG, "Released global lock for monitor %v", name)
		}
	}()
	evergreen.Logger.Logf(slogger.DEBUG, "Acquired global lock for monitor %v", name)
	errs = f()
	return errs
}
コード例 #4
0
ファイル: api.go プロジェクト: markbenvenuto/evergreen
// helper function for releasing the global lock
func releaseGlobalLock(requester string) {
	evergreen.Logger.Logf(slogger.DEBUG, "Attempting to release global lock from %v", requester)

	err := db.ReleaseGlobalLock(requester)
	if err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error releasing global lock from %v - this is really bad: %v", requester, err)
	}
	evergreen.Logger.Logf(slogger.DEBUG, "Released global lock from %v", requester)
}
コード例 #5
0
ファイル: runner.go プロジェクト: sr527/evergreen
func (r *Runner) Run(config *evergreen.Settings) error {
	lockAcquired, err := db.WaitTillAcquireGlobalLock(RunnerName, db.LockTimeout)
	if err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "Error acquiring global lock: %v", err)
	}

	if !lockAcquired {
		return evergreen.Logger.Errorf(slogger.ERROR, "Timed out acquiring global lock")
	}

	defer func() {
		if err := db.ReleaseGlobalLock(RunnerName); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Error releasing global lock: %v", err)
		}
	}()

	startTime := time.Now()
	evergreen.Logger.Logf(slogger.INFO, "Running repository tracker with db “%v”", config.Db)

	allProjects, err := model.FindAllTrackedProjectRefs()
	if err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "Error finding tracked projects %v", err)
	}

	numNewRepoRevisionsToFetch := config.RepoTracker.NumNewRepoRevisionsToFetch
	if numNewRepoRevisionsToFetch <= 0 {
		numNewRepoRevisionsToFetch = DefaultNumNewRepoRevisionsToFetch
	}

	var wg sync.WaitGroup
	wg.Add(len(allProjects))
	for _, projectRef := range allProjects {
		go func(projectRef model.ProjectRef) {
			defer wg.Done()

			tracker := &RepoTracker{
				config,
				&projectRef,
				NewGithubRepositoryPoller(&projectRef, config.Credentials["github"]),
			}

			err = tracker.FetchRevisions(numNewRepoRevisionsToFetch)
			if err != nil {
				evergreen.Logger.Errorf(slogger.ERROR, "Error fetching revisions: %v", err)
			}
		}(projectRef)
	}
	wg.Wait()

	runtime := time.Now().Sub(startTime)
	if err = model.SetProcessRuntimeCompleted(RunnerName, runtime); err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "Error updating process status: %v", err)
	}
	evergreen.Logger.Logf(slogger.INFO, "Repository tracker took %v to run", runtime)
	return nil
}
コード例 #6
0
ファイル: runner.go プロジェクト: bjori/evergreen
func (r *Runner) Run(config *evergreen.Settings) error {
	lockAcquired, err := db.WaitTillAcquireGlobalLock(RunnerName, db.LockTimeout)
	if err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "Error acquiring global lock: %v", err)
	}

	if !lockAcquired {
		return evergreen.Logger.Errorf(slogger.ERROR, "Timed out acquiring global lock")
	}

	defer func() {
		if err := db.ReleaseGlobalLock(RunnerName); err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "Error releasing global lock: %v", err)
		}
	}()

	startTime := time.Now()
	evergreen.Logger.Logf(slogger.INFO, "Starting scheduler at time %v", startTime)

	schedulerInstance := &Scheduler{
		config,
		&DBTaskFinder{},
		NewCmpBasedTaskPrioritizer(),
		&DBTaskDurationEstimator{},
		&DBTaskQueuePersister{},
		&DurationBasedHostAllocator{},
	}

	if err = schedulerInstance.Schedule(); err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "Error running scheduler: %v", err)
	}

	runtime := time.Now().Sub(startTime)
	if err = model.SetProcessRuntimeCompleted(RunnerName, runtime); err != nil {
		evergreen.Logger.Errorf(slogger.ERROR, "Error updating process status: %v", err)
	}
	evergreen.Logger.Logf(slogger.INFO, "Scheduler took %v to run", runtime)
	return nil
}
コード例 #7
0
ファイル: task_runner.go プロジェクト: tychoish/evergreen
// processDistro copies and starts remote agents for the given distro.
// This function takes a global lock. Returns any errors that occur.
func (self *TaskRunner) processDistro(distroId string) error {
	lockKey := fmt.Sprintf("%v.%v", RunnerName, distroId)
	// sleep for 1 second to give other spinning locks a chance to preempt this one
	time.Sleep(time.Second)
	lockAcquired, err := db.WaitTillAcquireGlobalLock(lockKey, db.LockTimeout)
	if err != nil {
		return evergreen.Logger.Errorf(slogger.ERROR, "error acquiring global lock for %v: %v", lockKey, err)
	}
	if !lockAcquired {
		return evergreen.Logger.Errorf(slogger.ERROR, "timed out acquiring global lock for %v", lockKey)
	}
	defer func() {
		err := db.ReleaseGlobalLock(lockKey)
		if err != nil {
			evergreen.Logger.Errorf(slogger.ERROR, "error releasing global lock for %v: %v", lockKey, err)
		}
	}()

	freeHostsForDistro, err := self.FindAvailableHostsForDistro(distroId)
	if err != nil {
		return fmt.Errorf("loading available %v hosts: %v", distroId, err)
	}
	evergreen.Logger.Logf(slogger.INFO, "Found %v %v host(s) available to take a task",
		len(freeHostsForDistro), distroId)
	evergreen.Logger.Logf(slogger.INFO, "Kicking off tasks on distro %v...", distroId)
	taskQueue, err := self.FindTaskQueue(distroId)
	if err != nil {
		return fmt.Errorf("error finding task queue for distro %v: %v",
			distroId, err)
	}
	if taskQueue == nil {
		evergreen.Logger.Logf(slogger.INFO, "nil task queue found for distro '%v'", distroId)
		return nil // nothing to do
	}

	// while there are both free hosts and pending tasks left, pin tasks to hosts
	waitGroup := &sync.WaitGroup{}
	for !taskQueue.IsEmpty() && len(freeHostsForDistro) > 0 {
		nextHost := freeHostsForDistro[0]
		nextTask, err := DispatchTaskForHost(taskQueue, &nextHost)
		if err != nil {
			return err
		}

		// can only get here if the queue is empty
		if nextTask == nil {
			continue
		}

		// once allocated to a task, pop the host off the distro's free host
		// list
		freeHostsForDistro = freeHostsForDistro[1:]

		// use the embedded host gateway to kick the task off
		waitGroup.Add(1)
		go func(t task.Task) {
			defer waitGroup.Done()
			agentRevision, err := self.RunTaskOnHost(self.Settings,
				t, nextHost)
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "error kicking off task %v"+
					" on host %v: %v", t.Id, nextHost.Id, err)

				if err := model.MarkTaskUndispatched(nextTask); err != nil {
					evergreen.Logger.Logf(slogger.ERROR, "error marking task %v as undispatched "+
						"on host %v: %v", t.Id, nextHost.Id, err)
				}
				return
			} else {
				evergreen.Logger.Logf(slogger.INFO, "Task %v successfully kicked"+
					" off on host %v", t.Id, nextHost.Id)
			}

			// now update the host's running task/agent revision fields
			// accordingly
			err = nextHost.SetRunningTask(t.Id, agentRevision, time.Now())
			if err != nil {
				evergreen.Logger.Logf(slogger.ERROR, "error updating running "+
					"task %v on host %v: %v", t.Id,
					nextHost.Id, err)
			}
		}(*nextTask)
	}
	waitGroup.Wait()
	return nil
}