// Runs the job func (r *JobRunner) Run() error { logger.Info("Starting job %s", r.Job.ID) // Start the build in the Buildkite Agent API. This is the first thing // we do so if it fails, we don't have to worry about cleaning things // up like started log streamer workers, etc. if err := r.startJob(time.Now()); err != nil { return err } // Start the header time streamer if err := r.headerTimesStreamer.Start(); err != nil { return err } // Start the log streamer if err := r.logStreamer.Start(); err != nil { return err } // Start the process. This will block until it finishes. if err := r.process.Start(); err != nil { // Send the error as output r.logStreamer.Process(fmt.Sprintf("%s", err)) } else { // Add the final output to the streamer r.logStreamer.Process(r.process.Output()) } // Store the finished at time finishedAt := time.Now() // Stop the header time streamer. This will block until all the chunks // have been uploaded r.headerTimesStreamer.Stop() // Stop the log streamer. This will block until all the chunks have // been uploaded r.logStreamer.Stop() // Warn about failed chunks if r.logStreamer.ChunksFailedCount > 0 { logger.Warn("%d chunks failed to upload for this job", r.logStreamer.ChunksFailedCount) } // Finish the build in the Buildkite Agent API r.finishJob(finishedAt, r.process.ExitStatus, int(r.logStreamer.ChunksFailedCount)) // Wait for the routines that we spun up to finish logger.Debug("[JobRunner] Waiting for all other routines to finish") r.routineWaitGroup.Wait() logger.Info("Finished job %s", r.Job.ID) return nil }
func (a *ArtifactSearcher) Search(query string, scope string) ([]*api.Artifact, error) { if scope == "" { logger.Info("Searching for artifacts: \"%s\"", query) } else { logger.Info("Searching for artifacts: \"%s\" within step: \"%s\"", query, scope) } options := &api.ArtifactSearchOptions{Query: query, Scope: scope} artifacts, _, err := a.APIClient.Artifacts.Search(a.BuildID, options) return artifacts, err }
// Stops the agent from accepting new work and cancels any current work it's // running func (a *AgentWorker) Stop(graceful bool) { // Only allow one stop to run at a time (because we're playing with // channels) a.stopMutex.Lock() defer a.stopMutex.Unlock() if graceful { if a.stopping { logger.Warn("Agent is already gracefully stopping...") } else { // If we have a job, tell the user that we'll wait for // it to finish before disconnecting if a.jobRunner != nil { logger.Info("Gracefully stopping agent. Waiting for current job to finish before disconnecting...") } else { logger.Info("Gracefully stopping agent. Since there is no job running, the agent will disconnect immediately") } } } else { // If there's a job running, kill it, then disconnect if a.jobRunner != nil { logger.Info("Forefully stopping agent. The current job will be canceled before disconnecting...") // Kill the current job. Doesn't do anything if the job // is already being killed, so it's safe to call // multiple times. a.jobRunner.Kill() } else { logger.Info("Forefully stopping agent. Since there is no job running, the agent will disconnect immediately") } } // We don't need to do the below operations again since we've already // done them before if a.stopping { return } // Update the proc title a.UpdateProcTitle("stopping") // If we have a ticker, stop it, and send a signal to the stop channel, // which will cause the agent worker to stop looping immediatly. if a.ticker != nil { close(a.stop) } // Mark the agent as stopping a.stopping = true }
func (a *ArtifactBatchCreator) Create() ([]*api.Artifact, error) { length := len(a.Artifacts) chunks := 30 // Split into the artifacts into chunks so we're not uploading a ton of // files at once. for i := 0; i < length; i += chunks { j := i + chunks if length < j { j = length } // The artifacts that will be uploaded in this chunk theseArtiacts := a.Artifacts[i:j] // An ID is required so Buildkite can ensure this create // operation is idompotent (if we try and upload the same ID // twice, it'll just return the previous data and skip the // upload) batch := &api.ArtifactBatch{api.NewUUID(), theseArtiacts, a.UploadDestination} logger.Info("Creating (%d-%d)/%d artifacts", i, j, length) var creation *api.ArtifactBatchCreateResponse var resp *api.Response var err error // Retry the batch upload a couple of times err = retry.Do(func(s *retry.Stats) error { creation, resp, err = a.APIClient.Artifacts.Create(a.JobID, batch) if resp != nil && (resp.StatusCode == 401 || resp.StatusCode == 404 || resp.StatusCode == 500) { s.Break() } if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) // Did the batch creation eventually fail? if err != nil { return nil, err } // Save the id and instructions to each artifact index := 0 for _, id := range creation.ArtifactIDs { theseArtiacts[index].ID = id theseArtiacts[index].UploadInstructions = creation.UploadInstructions index += 1 } } return a.Artifacts, nil }
func (a *ArtifactUploader) Upload() error { // Create artifact structs for all the files we need to upload artifacts, err := a.Collect() if err != nil { return err } if len(artifacts) == 0 { logger.Info("No files matched paths: %s", a.Paths) } else { logger.Info("Found %d files that match \"%s\"", len(artifacts), a.Paths) err := a.upload(artifacts) if err != nil { return err } } return nil }
func (r *JobRunner) Kill() error { if !r.cancelled { logger.Info("Canceling job %s", r.Job.ID) r.cancelled = true if r.process != nil { r.process.Kill() } else { logger.Error("No process to kill") } } return nil }
// Shows the welcome banner and the configuration options used when starting // this agent. func (r *AgentPool) ShowBanner() { welcomeMessage := "\n" + "%s _ _ _ _ _ _ _ _\n" + " | | (_) | | | | (_) | | |\n" + " | |__ _ _ _| | __| | | ___| |_ ___ __ _ __ _ ___ _ __ | |_\n" + " | '_ \\| | | | | |/ _` | |/ / | __/ _ \\ / _` |/ _` |/ _ \\ '_ \\| __|\n" + " | |_) | |_| | | | (_| | <| | || __/ | (_| | (_| | __/ | | | |_\n" + " |_.__/ \\__,_|_|_|\\__,_|_|\\_\\_|\\__\\___| \\__,_|\\__, |\\___|_| |_|\\__|\n" + " __/ |\n" + " http://buildkite.com/agent |___/\n%s\n" if logger.ColorsEnabled() { fmt.Fprintf(logger.OutputPipe(), welcomeMessage, "\x1b[32m", "\x1b[0m") } else { fmt.Fprintf(logger.OutputPipe(), welcomeMessage, "", "") } logger.Notice("Starting buildkite-agent v%s with PID: %s", Version(), fmt.Sprintf("%d", os.Getpid())) logger.Notice("The agent source code can be found here: https://github.com/buildkite/agent") logger.Notice("For questions and support, email us at: [email protected]") if r.ConfigFilePath != "" { logger.Info("Configuration loaded from: %s", r.ConfigFilePath) } logger.Debug("Bootstrap command: %s", r.AgentConfiguration.BootstrapScript) logger.Debug("Build path: %s", r.AgentConfiguration.BuildPath) logger.Debug("Hooks directory: %s", r.AgentConfiguration.HooksPath) logger.Debug("Plugins directory: %s", r.AgentConfiguration.PluginsPath) if !r.AgentConfiguration.SSHFingerprintVerification { logger.Debug("Automatic SSH fingerprint verification has been disabled") } if !r.AgentConfiguration.CommandEval { logger.Debug("Evaluating console commands has been disabled") } if !r.AgentConfiguration.RunInPty { logger.Debug("Running builds within a pseudoterminal (PTY) has been disabled") } }
func (a *ArtifactBatchCreator) Create() ([]*api.Artifact, error) { length := len(a.Artifacts) chunks := 10 uploaded := []*api.Artifact{} // Split into the artifacts into chunks so we're not uploading a ton of // files at once. for i := 0; i < length; i += chunks { j := i + chunks if length < j { j = length } artifacts := a.Artifacts[i:j] logger.Info("Creating (%d-%d)/%d artifacts", i, j, length) var u []*api.Artifact var err error // Retry the batch upload a couple of times err = retry.Do(func(s *retry.Stats) error { u, _, err = a.APIClient.Artifacts.Create(a.JobID, artifacts) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { return nil, err } uploaded = append(uploaded, u...) } return uploaded, nil }
// Performs a ping, which returns what action the agent should take next. func (a *AgentWorker) Ping() { // Update the proc title a.UpdateProcTitle("pinging") ping, _, err := a.APIClient.Pings.Get() if err != nil { // If a ping fails, we don't really care, because it'll // ping again after the interval. logger.Warn("Failed to ping: %s", err) return } // Should we switch endpoints? if ping.Endpoint != "" && ping.Endpoint != a.Agent.Endpoint { // Before switching to the new one, do a ping test to make sure it's // valid. If it is, switch and carry on, otherwise ignore the switch // for now. newAPIClient := APIClient{Endpoint: ping.Endpoint, Token: a.Agent.AccessToken}.Create() newPing, _, err := newAPIClient.Pings.Get() if err != nil { logger.Warn("Failed to ping the new endpoint %s - ignoring switch for now (%s)", ping.Endpoint, err) } else { // Replace the APIClient and process the new ping a.APIClient = newAPIClient a.Agent.Endpoint = ping.Endpoint ping = newPing } } // Is there a message that should be shown in the logs? if ping.Message != "" { logger.Info(ping.Message) } // Should the agent disconnect? if ping.Action == "disconnect" { a.Stop(false) return } // If we don't have a job, there's nothing to do! if ping.Job == nil { // Update the proc title a.UpdateProcTitle("idle") return } // Update the proc title a.UpdateProcTitle(fmt.Sprintf("job %s", strings.Split(ping.Job.ID, "-")[0])) logger.Info("Assigned job %s. Accepting...", ping.Job.ID) // Accept the job. We'll retry on connection related issues, but if // Buildkite returns a 422 or 500 for example, we'll just bail out, // re-ping, and try the whole process again. var accepted *api.Job retry.Do(func(s *retry.Stats) error { accepted, _, err = a.APIClient.Jobs.Accept(ping.Job) if err != nil { if api.IsRetryableError(err) { logger.Warn("%s (%s)", err, s) } else { logger.Warn("Buildkite rejected the call to accept the job (%s)", err) s.Break() } } return err }, &retry.Config{Maximum: 30, Interval: 1 * time.Second}) // If `accepted` is nil, then the job was never accepted if accepted == nil { logger.Error("Failed to accept job") return } // Now that the job has been accepted, we can start it. a.jobRunner, err = JobRunner{ Endpoint: accepted.Endpoint, Agent: a.Agent, AgentConfiguration: a.AgentConfiguration, Job: accepted, }.Create() // Was there an error creating the job runner? if err != nil { logger.Error("Failed to initialize job: %s", err) return } // Start running the job if err = a.jobRunner.Run(); err != nil { logger.Error("Failed to run job: %s", err) } // No more job, no more runner. a.jobRunner = nil }
} } // Create the API client client := agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create() // Generate a UUID that will identifiy this pipeline change. We // do this outside of the retry loop because we want this UUID // to be the same for each attempt at updating the pipeline. uuid := api.NewUUID() // Retry the pipeline upload a few times before giving up err = retry.Do(func(s *retry.Stats) error { _, err = client.Pipelines.Upload(cfg.Job, &api.Pipeline{UUID: uuid, Data: input, FileName: filename, Replace: cfg.Replace}) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 5, Interval: 1 * time.Second}) if err != nil { logger.Fatal("Failed to upload and process pipeline: %s", err) } logger.Info("Successfully uploaded and parsed pipeline config") }, }
func (d Download) try() error { // If we're downloading a file with a path of "pkg/foo.txt" to a folder // called "pkg", we should merge the two paths together. So, instead of it // downloading to: destination/pkg/pkg/foo.txt, it will just download to // destination/pkg/foo.txt destinationPaths := strings.Split(d.Destination, string(os.PathSeparator)) downloadPaths := strings.Split(d.Path, string(os.PathSeparator)) for i := 0; i < len(downloadPaths); i += 100 { // If the last part of the destination path matches // this path in the download, then cut it out. lastIndex := len(destinationPaths) - 1 // Break if we've gone too far. if lastIndex == -1 { break } lastPathInDestination := destinationPaths[lastIndex] if lastPathInDestination == downloadPaths[i] { destinationPaths = destinationPaths[:lastIndex] } } finalizedDestination := strings.Join(destinationPaths, string(os.PathSeparator)) targetFile := filepath.Join(finalizedDestination, d.Path) targetDirectory, _ := filepath.Split(targetFile) // Show a nice message that we're starting to download the file logger.Debug("Downloading %s to %s", d.URL, targetFile) // Start by downloading the file response, err := d.Client.Get(d.URL) if err != nil { return fmt.Errorf("Error while downloading %s (%T: %v)", d.URL, err, err) } defer response.Body.Close() // Double check the status if response.StatusCode/100 != 2 && response.StatusCode/100 != 3 { if d.DebugHTTP { responseDump, err := httputil.DumpResponse(response, true) logger.Debug("\nERR: %s\n%s", err, string(responseDump)) } return &downloadError{response.Status} } // Now make the folder for our file err = os.MkdirAll(targetDirectory, 0777) if err != nil { return fmt.Errorf("Failed to create folder for %s (%T: %v)", targetFile, err, err) } // Create a file to handle the file fileBuffer, err := os.Create(targetFile) if err != nil { return fmt.Errorf("Failed to create file %s (%T: %v)", targetFile, err, err) } defer fileBuffer.Close() // Copy the data to the file bytes, err := io.Copy(fileBuffer, response.Body) if err != nil { return fmt.Errorf("Error when copying data %s (%T: %v)", d.URL, err, err) } logger.Info("Successfully downloaded \"%s\" %d bytes", d.Path, bytes) return nil }
func (p *Process) Start() error { c, err := shell.CommandFromString(p.Script) if err != nil { return err } p.command = exec.Command(c.Command, c.Args...) // Copy the current processes ENV and merge in the new ones. We do this // so the sub process gets PATH and stuff. We merge our path in over // the top of the current one so the ENV from Buildkite and the agent // take precedence over the agent currentEnv := os.Environ() p.command.Env = append(currentEnv, p.Env...) var waitGroup sync.WaitGroup lineReaderPipe, lineWriterPipe := io.Pipe() multiWriter := io.MultiWriter(&p.buffer, lineWriterPipe) logger.Info("Starting to run: %s", c.String()) // Toggle between running in a pty if p.PTY { pty, err := StartPTY(p.command) if err != nil { p.ExitStatus = "1" return err } p.Pid = p.command.Process.Pid p.setRunning(true) waitGroup.Add(1) go func() { logger.Debug("[Process] Starting to copy PTY to the buffer") // Copy the pty to our buffer. This will block until it // EOF's or something breaks. _, err = io.Copy(multiWriter, pty) if e, ok := err.(*os.PathError); ok && e.Err == syscall.EIO { // We can safely ignore this error, because // it's just the PTY telling us that it closed // successfully. See: // https://github.com/buildkite/agent/pull/34#issuecomment-46080419 err = nil } if err != nil { logger.Error("[Process] PTY output copy failed with error: %T: %v", err, err) } else { logger.Debug("[Process] PTY has finished being copied to the buffer") } waitGroup.Done() }() } else { p.command.Stdout = multiWriter p.command.Stderr = multiWriter p.command.Stdin = nil err := p.command.Start() if err != nil { p.ExitStatus = "1" return err } p.Pid = p.command.Process.Pid p.setRunning(true) } logger.Info("[Process] Process is running with PID: %d", p.Pid) // Add the line callback routine to the waitGroup waitGroup.Add(1) go func() { logger.Debug("[LineScanner] Starting to read lines") reader := bufio.NewReader(lineReaderPipe) var appending []byte for { line, isPrefix, err := reader.ReadLine() if err != nil { if err == io.EOF { logger.Debug("[LineScanner] Encountered EOF") break } logger.Error("[LineScanner] Failed to read: (%T: %v)", err, err) } // If isPrefix is true, that means we've got a really // long line incoming, and we'll keep appending to it // until isPrefix is false (which means the long line // has ended. if isPrefix && appending == nil { logger.Debug("[LineScanner] Line is too long to read, going to buffer it until it finishes") appending = line continue } // Should we be appending? if appending != nil { appending = append(appending, line...) // No more isPrefix! Line is finished! if !isPrefix { logger.Debug("[LineScanner] Finished buffering long line") line = appending // Reset appending back to nil appending = nil } else { continue } } go p.LineCallback(string(line)) } logger.Debug("[LineScanner] Finished") waitGroup.Done() }() // Call the StartCallback go p.StartCallback() // Wait until the process has finished. The returned error is nil if the command runs, // has no problems copying stdin, stdout, and stderr, and exits with a zero exit status. waitResult := p.command.Wait() // Close the line writer pipe lineWriterPipe.Close() // The process is no longer running at this point p.setRunning(false) // Find the exit status of the script p.ExitStatus = getExitStatus(waitResult) logger.Info("Process with PID: %d finished with Exit Status: %s", p.Pid, p.ExitStatus) // Sometimes (in docker containers) io.Copy never seems to finish. This is a mega // hack around it. If it doesn't finish after 1 second, just continue. logger.Debug("[Process] Waiting for routines to finish") err = timeoutWait(&waitGroup) if err != nil { logger.Debug("[Process] Timed out waiting for wait group: (%T: %v)", err, err) } // No error occurred so we can return nil return nil }
func (r *AgentPool) Start() error { // Show the welcome banner and config options used r.ShowBanner() // Create the agent registration API Client r.APIClient = APIClient{Endpoint: r.Endpoint, Token: r.Token}.Create() // Create the agent template. We use pass this template to the register // call, at which point we get back a real agent. template := r.CreateAgentTemplate() logger.Info("Registering agent with Buildkite...") // Register the agent registered, err := r.RegisterAgent(template) if err != nil { logger.Fatal("%s", err) } logger.Info("Successfully registered agent \"%s\" with meta-data %s", registered.Name, registered.MetaData) logger.Debug("Ping interval: %ds", registered.PingInterval) logger.Debug("Heartbeat interval: %ds", registered.HearbeatInterval) // Now that we have a registereted agent, we can connect it to the API, // and start running jobs. worker := AgentWorker{Agent: registered, AgentConfiguration: r.AgentConfiguration, Endpoint: r.Endpoint}.Create() logger.Info("Connecting to Buildkite...") if err := worker.Connect(); err != nil { logger.Fatal("%s", err) } logger.Info("Agent successfully connected") logger.Info("You can press Ctrl-C to stop the agent") logger.Info("Waiting for work...") // Start a signalwatcher so we can monitor signals and handle shutdowns signalwatcher.Watch(func(sig signalwatcher.Signal) { if sig == signalwatcher.QUIT { logger.Debug("Received signal `%s`", sig.String()) worker.Stop(false) } else if sig == signalwatcher.TERM || sig == signalwatcher.INT { logger.Debug("Received signal `%s`", sig.String()) worker.Stop(true) } else { logger.Debug("Ignoring signal `%s`", sig.String()) } }) // Starts the agent worker. This will block until the agent has // finished or is stopped. if err := worker.Start(); err != nil { logger.Fatal("%s", err) } // Now that the agent has stopped, we can disconnect it logger.Info("Disconnecting %s...", worker.Agent.Name) worker.Disconnect() return nil }
loader := cliconfig.Loader{CLI: c, Config: &cfg} if err := loader.Load(); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Find the pipeline file either from STDIN or the first // argument var input []byte var err error var filename string if cfg.FilePath != "" { logger.Info("Reading pipeine config from \"%s\"", cfg.FilePath) filename = filepath.Base(cfg.FilePath) input, err = ioutil.ReadFile(cfg.FilePath) if err != nil { logger.Fatal("Failed to read file: %s", err) } } else if stdin.IsPipe() { logger.Info("Reading pipeine config from STDIN") input, err = ioutil.ReadAll(os.Stdin) if err != nil { logger.Fatal("Failed to read from STDIN: %s", err) } } else { logger.Info("Searching for pipeline config...")
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, } artifacts, err = batchCreator.Create() if err != nil { return err } p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { artifact.State = "error" logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } else { artifact.State = "finished" } // Update the state of the artifact on Buildkite, we // retry this as well. err = retry.Do(func(s *retry.Stats) error { _, _, err = a.APIClient.Artifacts.Update(a.JobID, artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error marking artifact %s as uploaded: %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination, a.APIClient.DebugHTTP) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, UploadDestination: a.Destination, } artifacts, err = batchCreator.Create() if err != nil { return err } // Prepare a concurrency pool to upload the artifacts p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} // Create a wait group so we can make sure the uploader waits for all // the artifact states to upload before finishing var stateUploaderWaitGroup sync.WaitGroup stateUploaderWaitGroup.Add(1) // A map to keep track of artifact states and how many we've uploaded artifactsStates := make(map[string]string) artifactStatesUploaded := 0 // Spin up a gourtine that'll uploading artifact statuses every few // seconds in batches go func() { for artifactStatesUploaded < len(artifacts) { statesToUpload := make(map[string]string) // Grab all the states we need to upload, and remove // them from the tracking map for id, state := range artifactsStates { statesToUpload[id] = state delete(artifactsStates, id) } if len(statesToUpload) > 0 { artifactStatesUploaded += len(statesToUpload) for id, state := range statesToUpload { logger.Debug("Artifact `%s` has state `%s`", id, state) } // Update the states of the artifacts in bulk. err = retry.Do(func(s *retry.Stats) error { _, err = a.APIClient.Artifacts.Update(a.JobID, statesToUpload) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error uploading artifact states: %s", err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } logger.Debug("Uploaded %d artfact states (%d/%d)", len(statesToUpload), artifactStatesUploaded, len(artifacts)) } // Check again for states to upload in a few seconds time.Sleep(1 * time.Second) } stateUploaderWaitGroup.Done() }() for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) var state string // Did the upload eventually fail? if err != nil { logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() state = "error" } else { state = "finished" } artifactsStates[artifact.ID] = state }) } // Wait for the pool to finish p.Wait() // Wait for the statuses to finish uploading stateUploaderWaitGroup.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }
func (a *ArtifactDownloader) Download() error { // Turn the download destination into an absolute path and confirm it exists downloadDestination, _ := filepath.Abs(a.Destination) fileInfo, err := os.Stat(downloadDestination) if err != nil { logger.Fatal("Could not find information about destination: %s", downloadDestination) } if !fileInfo.IsDir() { logger.Fatal("%s is not a directory", downloadDestination) } // Find the artifacts that we want to download searcher := ArtifactSearcher{BuildID: a.BuildID, APIClient: a.APIClient} artifacts, err := searcher.Search(a.Query, a.Step) if err != nil { return err } artifactCount := len(artifacts) if artifactCount == 0 { logger.Info("No artifacts found for downloading") } else { logger.Info("Found %d artifacts. Starting to download to: %s", artifactCount, downloadDestination) p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { var err error // Handle downloading from S3 and GS if strings.HasPrefix(artifact.UploadDestination, "s3://") { err = S3Downloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else if strings.HasPrefix(artifact.UploadDestination, "gs://") { err = GSDownloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else { err = Download{ URL: artifact.URL, Path: artifact.Path, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } // If the downloaded encountered an error, lock // the pool, collect it, then unlock the pool // again. if err != nil { logger.Error("Failed to download artifact: %s", err) p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with downloading some of the artifacts") } } return nil }