Beispiel #1
0
// SimulateFromFile runs a simulation for a single specified batch of jobs and appends the results to a
// specified file in CSV format
func SimulateFromFile(
	clusterSize int, sysLoad int, inputPath string, outputPath string,
	outputLock *sync.Mutex, threadCapper *models.Semaphore,
	waitGroup *sync.WaitGroup, schedulerType int) {

	threadCapper.Lock()

	jobs := utils.ReadJobs(inputPath)

	eventQueue := make(models.EventQueue, len(jobs))
	for i := 0; i < len(jobs); i++ {
		job := jobs[i]
		event := models.Event{
			Time: job.Arrival,
			Job:  &job,
			Type: models.Arrival,
		}
		eventQueue[i] = &event
	}
	heap.Init(&eventQueue)

	nodeQueue := make(models.NodeQueue, clusterSize)
	for i := 0; i < clusterSize; i++ {
		node := models.Node{
			CurJob:        nil,
			EstCompletion: -1,
			Index:         i,
		}
		nodeQueue[i] = &node
	}

	jobQueue := make(models.JobQueue, 0)

	var scheduler scheduling.Scheduler
	switch schedulerType {
	case Markov:
		scheduler = &scheduling.MarkovScheduler{
			QueuedJobs:   &jobQueue,
			Nodes:        &nodeQueue,
			ScheduleSize: clusterSize * 384,
		}
	case StandardEDF:
		scheduler = &scheduling.StandardEDFScheduler{
			QueuedJobs: &jobQueue,
			Nodes:      &nodeQueue,
		}
	case OmniscientEDF:
		scheduler = &scheduling.OmniscientEDFScheduler{
			QueuedJobs: &jobQueue,
			Nodes:      &nodeQueue,
		}
	}
	scheduler.Init()

	jobsSeen, jobsRejected, jobsCompleted, jobsKilled, jobsDlMissed := 0, 0, 0, 0, 0
	for eventQueue.Len() > 0 {
		event := heap.Pop(&eventQueue).(*models.Event)
		switch event.Type {
		case models.Arrival:
			jobsSeen++
			if scheduler.IsJobAdmitted(event.Job) {
				node := heap.Pop(&nodeQueue).(*models.Node)
				if node.CurJob == nil { // Start job right away if there's a Node waiting
					startNextJob(event, node, heap.Pop(&jobQueue).(*models.Job), scheduler, &eventQueue)
				}
				// Completion events from other Jobs will automatically start the next Job for us if
				// no nodes are ready now
				heap.Push(&nodeQueue, node)
			} else { // !IsJobAdmitted
				jobsRejected++
			}

		case models.Stretch:
			event.Node.EstCompletion += scheduler.GetAllowance()
			heap.Fix(&nodeQueue, event.Node.Index)
			if scheduler.GetAllocation(event.Job)+scheduler.GetAllowance() >= event.Job.RealExec {
				heap.Push(&eventQueue, &models.Event{
					Job:  event.Job,
					Time: event.Time + event.Job.RealExec - scheduler.GetAllocation(event.Job),
					Node: event.Node,
					Type: models.Complete,
				})
			} else {
				heap.Push(&eventQueue, &models.Event{
					Job:  event.Job,
					Time: event.Node.EstCompletion,
					Node: event.Node,
					Type: models.Kill,
				})
			}
			scheduler.DeductAllowance()

		case models.Complete:
			jobsCompleted++
			scheduler.SuccessCallback(event.Job)
			if jobQueue.Len() > 0 {
				startNextJob(event, event.Node, heap.Pop(&jobQueue).(*models.Job), scheduler, &eventQueue)
			} else {
				event.Node.CurJob = nil
				event.Node.EstCompletion = -1
			}
			heap.Fix(&nodeQueue, event.Node.Index)

		case models.Kill:
			jobsKilled++
			scheduler.FailCallback(event.Job)
			if jobQueue.Len() > 0 {
				startNextJob(event, event.Node, heap.Pop(&jobQueue).(*models.Job), scheduler, &eventQueue)
			} else {
				event.Node.CurJob = nil
				event.Node.EstCompletion = -1
			}
			heap.Fix(&nodeQueue, event.Node.Index)

		case models.Miss:
			jobsDlMissed++
			scheduler.FailCallback(event.Job)
			if jobQueue.Len() > 0 {
				startNextJob(event, event.Node, heap.Pop(&jobQueue).(*models.Job), scheduler, &eventQueue)
			} else {
				event.Node.CurJob = nil
				event.Node.EstCompletion = -1
			}
			heap.Fix(&nodeQueue, event.Node.Index)
		}
	}

	outputLock.Lock()
	var record = []string{
		fmt.Sprintf("%d", clusterSize),
		fmt.Sprintf("%d", sysLoad),
		fmt.Sprintf("%d", jobsSeen),
		fmt.Sprintf("%d", jobsCompleted),
		fmt.Sprintf("%d", jobsRejected),
		fmt.Sprintf("%d", jobsKilled),
		fmt.Sprintf("%d", jobsDlMissed),
	}
	file, err := os.OpenFile(outputPath, os.O_APPEND|os.O_WRONLY, 0666)
	utils.Check(err)
	writer := csv.NewWriter(file)
	err = writer.Write(record)
	utils.Check(err)
	writer.Flush()
	if writer.Error() != nil {
		fmt.Println(writer.Error())
	} else {
		file.Sync()
	}
	file.Close()
	outputLock.Unlock()
	threadCapper.Free()
	waitGroup.Done()
}
Beispiel #2
0
func readWrapper(filePath string, waitGroup *sync.WaitGroup, threadCapper *models.Semaphore) {
	threadCapper.Lock()
	utils.ReadJobs(filePath)
	threadCapper.Free()
	waitGroup.Done()
}
Beispiel #3
0
// GenJobs generates and returns a slice of Jobs with timings
// derived from exponentially distributed random variables.
func GenJobs(sysLoad float64, simTime int, clusterSize int, outPath string, stats chan string,
	precision chan float64, threadCap *models.Semaphore, jobPool *sync.Pool) {

	threadCap.Lock()
	var totalComputation = 0
	var numJobs = round(
		(sysLoad * float64(simTime) * float64(clusterSize)) / realExecAvg)
	var numSubmitters = round(math.Sqrt(float64(numJobs)))
	submitterEstAvgs := make([]float64, numSubmitters)
	for i := 0; i < numSubmitters; i++ {
		for {
			submitterEstAvgs[i] = rand.ExpFloat64() * estExecAvg
			if submitterEstAvgs[i] > estExecAvg/15.0 && submitterEstAvgs[i] < estExecAvg*15.0 {
				break
			}
		}
	}

	var burstSize int
	var burstAvg = sysLoad * float64(clusterSize) * burstMult

	var interval int
	var intervalLambda = float64(simTime) / (float64(numJobs) / burstAvg)

	var jobs []models.Job
	var arrival = 0
	var estExec, realExec, deadline, submitter int

	for i := 0; i < numJobs; {
		submitter = rand.Intn(numSubmitters)
		if i != 0 {
			interval = round(rand.ExpFloat64() * float64(intervalLambda))
			arrival += interval
		}
		burstSize = round(rand.ExpFloat64() * burstAvg)
		for j := 0; j < burstSize; j++ {
			realExec = round(rand.ExpFloat64() * realExecAvg)
			for { // estExec must always be > realExec
				estExec = round(rand.ExpFloat64() * submitterEstAvgs[submitter])
				if estExec > realExec {
					break
				}
			}
			for { // deadline must always be > estExec (n.b. this implicitly makes it > realExec)
				deadline = round(rand.ExpFloat64() * deadlineAvg)
				if deadline > estExec {
					break
				}
			}

			var newJob *models.Job
			poolFetch := jobPool.Get()
			if poolFetch == nil {
				newJob = &models.Job{}
			} else {
				newJob = poolFetch.(*models.Job)
			}
			newJob.ID = i
			newJob.SubmitterID = submitter
			newJob.Arrival = arrival
			newJob.EstExec = estExec
			newJob.RealExec = realExec
			newJob.Deadline = deadline
			jobs = append(jobs, *newJob)
			jobPool.Put(newJob)

			totalComputation += realExec
			i++
		}
	}
	WriteJobsToCSV(jobs, outPath)
	var trueLoad = (float64(totalComputation) / (float64(simTime) * float64(clusterSize)))
	precision <- (trueLoad / sysLoad)
	var statsMsg = "Generated jobs for:\n" +
		"    Cluster Size: %v\n" +
		"    System Load: %v\n" +
		"    Time Span: %v\n" +
		"    Actual computation load: %v\n"
	statsMsg = fmt.Sprintf(statsMsg, clusterSize, sysLoad, simTime, trueLoad)
	stats <- statsMsg
	threadCap.Free()
}