Example #1
0
func (e *Executor) run(gather bool) ([]*CliJobResponse, error) {
	on := e.On
	remote := on.Group()
	single := len(on) == 1
	responses := []*CliJobResponse{}

	// Check each job first, return the first error (coding bugs)
	byDestination := make([]requestedJobs, len(remote))
	for i := range remote {
		group := remote[i]
		jobs := e.requests(group)
		if err := jobs.check(); err != nil {
			return responses, err
		}
		byDestination[i] = jobs
		for j := range jobs {
			remote, err := e.Transport.RemoteJobFor(jobs[j].Locator.TransportLocator(), jobs[j].Request)
			if err != nil {
				return responses, err
			}
			byDestination[i][j].Job = remote
		}
	}

	respch := make(chan *CliJobResponse, len(on))
	tasks := &sync.WaitGroup{}
	stdout := log.New(e.Output, "", 0)

	// Executes jobs against each destination in parallel, but serial on each destination.
	for i := range byDestination {
		allJobs := byDestination[i]
		host := allJobs[0].Locator.TransportLocator()

		tasks.Add(1)
		go func() {
			w := logstreamer.NewLogstreamer(stdout, prefixUnless(host.String()+" ", single), false)
			defer w.Close()
			defer tasks.Done()

			for _, job := range allJobs {
				response := &CliJobResponse{Output: w, Gather: gather}
				job.Job.Execute(response)
				respch <- e.react(response, w, job.Request)
			}
		}()
	}

	tasks.Wait()
Response:
	for {
		select {
		case resp := <-respch:
			responses = append(responses, resp)
		default:
			break Response
		}
	}

	return responses, nil
}
Example #2
0
func (e *Executor) run(gather bool) ([]*CliJobResponse, error) {
	on := e.On
	local, remote := Locators(on).Group()
	single := len(on) == 1
	responses := []*CliJobResponse{}

	// Check each job first, return the first error (coding bugs)
	localJobs := e.jobs(local)
	if err := localJobs.check(); err != nil {
		return responses, err
	}
	remoteJobs := make([]remoteJobSet, len(remote))
	for i := range remote {
		jobs := e.jobs(remote[i])
		if err := jobs.check(); err != nil {
			return responses, err
		}
		remotes, err := jobs.remotes()
		if err != nil {
			return responses, err
		}
		remoteJobs[i] = remotes
	}

	// Perform local initialization
	if len(local) > 0 && e.LocalInit != nil {
		if err := e.LocalInit(); err != nil {
			return responses, err
		}
	}

	respch := make(chan *CliJobResponse, len(on))
	tasks := &sync.WaitGroup{}
	stdout := log.New(e.Output, "", 0)

	// Execute the local jobs in serial (can parallelize later)
	if len(localJobs) > 0 {
		tasks.Add(1)
		go func() {
			w := logstreamer.NewLogstreamer(stdout, "local ", false)
			defer w.Close()
			defer tasks.Done()

			for _, job := range localJobs {
				response := &CliJobResponse{Output: w, Gather: gather}
				job.Execute(response)
				respch <- e.react(response, w, job)
			}
		}()
	}

	// Executes jobs against each remote server in parallel
	for i := range remote {
		ids := remote[i]
		allJobs := remoteJobs[i]
		host := ids[0].HostIdentity()
		locator := ids[0].(http.RemoteLocator)

		tasks.Add(1)
		go func() {
			w := logstreamer.NewLogstreamer(stdout, prefixUnless(host+" ", single), false)
			logger := log.New(w, "", 0)
			defer w.Close()
			defer tasks.Done()

			dispatcher := http.NewHttpDispatcher(locator, logger)
			for _, job := range allJobs {
				response := &CliJobResponse{Output: w, Gather: gather}
				if err := dispatcher.Dispatch(job, response); err != nil {
					// set an explicit error
					response = &CliJobResponse{
						Error: jobs.SimpleJobError{jobs.JobResponseError, fmt.Sprintf("The server did not respond correctly: %s", err.Error())},
					}
				}
				respch <- e.react(response, w, job)
			}
		}()
	}

	tasks.Wait()
Response:
	for {
		select {
		case resp := <-respch:
			responses = append(responses, resp)
		default:
			break Response
		}
	}

	return responses, nil
}
Example #3
0
func (e *Executor) run(gather bool) ([]*CliJobResponse, error) {
	on := e.On
	local, remote := on.Group()
	single := len(on) == 1
	responses := []*CliJobResponse{}

	// Check each job first, return the first error (coding bugs)
	localJobs := e.jobs(local)
	if err := localJobs.check(); err != nil {
		return responses, err
	}
	remoteJobs := make([][]remoteJob, len(remote))
	for i := range remote {
		locator := remote[i]
		jobs := e.jobs(locator)
		if err := jobs.check(); err != nil {
			return responses, err
		}
		remotes := make([]remoteJob, len(jobs))
		for j := range jobs {
			remote, err := e.Transport.RemoteJobFor(locator[0].TransportLocator(), jobs[j])
			if err != nil {
				return responses, err
			}
			remotes[j] = remoteJob{remote, jobs[j], locator[0]}
		}
		remoteJobs[i] = remotes
	}

	// Perform local initialization
	if len(local) > 0 && e.LocalInit != nil {
		if err := e.LocalInit(); err != nil {
			return responses, err
		}
	}

	respch := make(chan *CliJobResponse, len(on))
	tasks := &sync.WaitGroup{}
	stdout := log.New(e.Output, "", 0)

	// Execute the local jobs in serial (can parallelize later)
	if len(localJobs) > 0 {
		tasks.Add(1)
		go func() {
			w := logstreamer.NewLogstreamer(stdout, prefixUnless("local ", single), false)
			defer w.Close()
			defer tasks.Done()

			for _, job := range localJobs {
				response := &CliJobResponse{Output: w, Gather: gather}
				job.Execute(response)
				respch <- e.react(response, w, job)
			}
		}()
	}

	// Executes jobs against each remote server in parallel (could parallel to each server if necessary)
	for i := range remote {
		ids := remote[i]
		allJobs := remoteJobs[i]
		host := ids[0].TransportLocator()

		tasks.Add(1)
		go func() {
			w := logstreamer.NewLogstreamer(stdout, prefixUnless(host.String()+" ", single), false)
			defer w.Close()
			defer tasks.Done()

			for _, job := range allJobs {
				response := &CliJobResponse{Output: w, Gather: gather}
				job.Execute(response)
				respch <- e.react(response, w, job.Original)
			}
		}()
	}

	tasks.Wait()
Response:
	for {
		select {
		case resp := <-respch:
			responses = append(responses, resp)
		default:
			break Response
		}
	}

	return responses, nil
}