Пример #1
0
// if this should not run, return false
func (tr *TaskRunner) Run(fc *flow.FlowContext) {
	if fc.Id != tr.option.ContextId {
		return
	}
	fc.ChannelBufferSize = tr.option.ChannelBufferSize

	taskGroups := plan.GroupTasks(fc)

	tr.Tasks = taskGroups[tr.option.TaskGroupId].Tasks
	tr.FlowContext = fc

	if len(tr.Tasks) == 0 {
		log.Println("How can the task group has no tasks!")
		return
	}

	// println("taskGroup", tr.Tasks[0].Name(), "starts")
	// 4. setup task input and output channels
	var wg sync.WaitGroup
	tr.connectInputsAndOutputs(&wg)
	// 6. starts to run the task locally
	for _, task := range tr.Tasks {
		// println("run task", task.Name())
		wg.Add(1)
		go func(task *flow.Task) {
			defer wg.Done()
			task.RunTask()
		}(task)
	}
	// 7. need to close connected output channels
	wg.Wait()
	// println("taskGroup", tr.Tasks[0].Name(), "finishes")
}
Пример #2
0
// if this should not run, return false
func (tr *TaskRunner) Run(fc *flow.FlowContext) {
	if fc.Id != tr.option.ContextId {
		return
	}
	fc.ChannelBufferSize = tr.option.ChannelBufferSize

	_, taskGroups := plan.GroupTasks(fc)
	tr.Tasks = taskGroups[tr.option.TaskGroupId].Tasks
	tr.FlowContext = fc

	tr.executorStatus.StartTime = time.Now()

	go tr.reportLocalExecutorStatus()

	// println("taskGroup", tr.Tasks[0].Name(), "starts")
	// 4. setup task input and output channels
	var wg sync.WaitGroup
	tr.connectInputsAndOutputs(&wg)
	// 6. starts to run the task locally
	for _, task := range tr.Tasks {
		// println("run task", task.Name())
		wg.Add(1)
		go func(task *flow.Task) {
			defer wg.Done()
			task.RunTask()
		}(task)
	}
	// 7. need to close connected output channels
	wg.Wait()
	// println("taskGroup", tr.Tasks[0].Name(), "finishes", tr.option.RequestId)
	tr.executorStatus.StopTime = time.Now()

	tr.reportLocalExecutorStatusOnce()
}
Пример #3
0
// driver runs on local, controlling all tasks
func (fcd *FlowContextDriver) Run(fc *flow.FlowContext) {

	// task fusion to minimize disk IO
	taskGroups := plan.GroupTasks(fc)
	// plot the execution graph
	if fcd.option.PlotOutput {
		plan.PlotGraph(taskGroups, fc)
		return
	}

	// start server to serve files to agents to run exectuors
	rsyncServer, err := rsync.NewRsyncServer(os.Args[0], fcd.option.RelatedFileNames())
	if err != nil {
		log.Fatalf("Failed to start local server: %v", err)
	}
	rsyncServer.Start()

	// create thes cheduler
	sched := scheduler.NewScheduler(
		fcd.option.Leader,
		&scheduler.SchedulerOption{
			DataCenter:         fcd.option.DataCenter,
			Rack:               fcd.option.Rack,
			TaskMemoryMB:       fcd.option.TaskMemoryMB,
			DriverPort:         rsyncServer.Port,
			Module:             fcd.option.Module,
			ExecutableFile:     os.Args[0],
			ExecutableFileHash: rsyncServer.ExecutableFileHash(),
		},
	)

	// best effort to clean data on agent disk
	// this may need more improvements
	defer fcd.Cleanup(sched, fc, taskGroups)

	go sched.EventLoop()

	flow.OnInterrupt(func() {
		fcd.OnInterrupt(fc, taskGroups, sched)
	})

	// schedule to run the steps
	var wg sync.WaitGroup
	for _, taskGroup := range taskGroups {
		wg.Add(1)
		sched.EventChan <- scheduler.SubmitTaskGroup{
			FlowContext: fc,
			TaskGroup:   taskGroup,
			Bid:         fcd.option.FlowBid / float64(len(taskGroups)),
			WaitGroup:   &wg,
		}
	}
	go sched.Market.FetcherLoop()

	wg.Wait()

	fcd.CloseOutputChannels(fc)

}
Пример #4
0
// driver runs on local, controlling all tasks
func (fcd *FlowContextDriver) Run(fc *flow.FlowContext) {

	taskGroups := plan.GroupTasks(fc)
	if fcd.option.PlotOutput {
		plan.PlotGraph(taskGroups, fc)
		return
	}

	rsyncServer, err := rsync.NewRsyncServer(os.Args[0], fcd.option.RelatedFileNames())
	if err != nil {
		log.Fatalf("Failed to start local server: %v", err)
	}
	rsyncServer.Start()

	sched := scheduler.NewScheduler(
		fcd.option.Leader,
		&scheduler.SchedulerOption{
			DataCenter:         fcd.option.DataCenter,
			Rack:               fcd.option.Rack,
			TaskMemoryMB:       fcd.option.TaskMemoryMB,
			DriverPort:         rsyncServer.Port,
			Module:             fcd.option.Module,
			ExecutableFile:     os.Args[0],
			ExecutableFileHash: rsyncServer.ExecutableFileHash(),
		},
	)
	defer fcd.Cleanup(sched, fc, taskGroups)

	go sched.EventLoop()

	// schedule to run the steps
	var wg sync.WaitGroup
	for _, taskGroup := range taskGroups {
		wg.Add(1)
		sched.EventChan <- scheduler.SubmitTaskGroup{
			FlowContext: fc,
			TaskGroup:   taskGroup,
			Bid:         fcd.option.FlowBid / float64(len(taskGroups)),
			WaitGroup:   &wg,
		}
	}
	go sched.Market.FetcherLoop()

	wg.Wait()

	fcd.CloseOutputChannels(fc)

}
Пример #5
0
// driver runs on local, controlling all tasks
func (fcd *FlowContextDriver) Run(fc *flow.FlowContext) {

	fcd.checkParameters()

	// task fusion to minimize disk IO
	fcd.stepGroups, fcd.taskGroups = plan.GroupTasks(fc)
	// plot the execution graph
	if fcd.Option.PlotOutput {
		plan.PlotGraph(fcd.taskGroups, fc)
		return
	}

	tlsConfig := fcd.Option.CertFiles.MakeTLSConfig()
	util.SetupHttpClient(tlsConfig)

	// start server to serve files to agents to run exectuors
	rsyncServer, err := rsync.NewRsyncServer(os.Args[0], fcd.Option.RelatedFileNames())
	if err != nil {
		log.Fatalf("Failed to start local server: %v", err)
	}
	rsyncServer.StartRsyncServer(tlsConfig, fcd.Option.Host+":"+strconv.Itoa(fcd.Option.Port))

	driverHost := fcd.Option.Host

	// create thes cheduler
	sched := scheduler.NewScheduler(
		fcd.Option.Leader,
		&scheduler.SchedulerOption{
			DataCenter:         fcd.Option.DataCenter,
			Rack:               fcd.Option.Rack,
			TaskMemoryMB:       fcd.Option.TaskMemoryMB,
			DriverHost:         driverHost,
			DriverPort:         rsyncServer.Port,
			Module:             fcd.Option.Module,
			ExecutableFile:     os.Args[0],
			ExecutableFileHash: rsyncServer.ExecutableFileHash(),
			TlsConfig:          tlsConfig,
		},
	)

	// best effort to clean data on agent disk
	// this may need more improvements
	defer fcd.Cleanup(sched, fc)

	go sched.EventLoop()

	flow.OnInterrupt(func() {
		fcd.OnInterrupt(fc, sched)
	}, func() {
		fcd.OnExit(fc, sched)
	})

	// schedule to run the steps
	var wg sync.WaitGroup
	for _, taskGroup := range fcd.taskGroups {
		wg.Add(1)
		sched.EventChan <- scheduler.SubmitTaskGroup{
			FlowContext: fc,
			TaskGroup:   taskGroup,
			Bid:         fcd.Option.FlowBid / float64(len(fcd.taskGroups)),
			WaitGroup:   &wg,
		}
	}
	go sched.Market.FetcherLoop()

	wg.Wait()

	fcd.CloseOutputChannels(fc)

	if fcd.Option.ShowFlowStats {
		fcd.ShowFlowStatus(fc, sched)
	}

}
Пример #6
0
func (fcd *FlowContextDriver) Plot(fc *flow.FlowContext) {
	_, fcd.taskGroups = plan.GroupTasks(fc)
	plan.PlotGraph(fcd.taskGroups, fc)
}