func (a *ArtifactDownloader) Download() error { // Turn the download destination into an absolute path and confirm it exists downloadDestination, _ := filepath.Abs(a.Destination) fileInfo, err := os.Stat(downloadDestination) if err != nil { logger.Fatal("Could not find information about destination: %s", downloadDestination) } if !fileInfo.IsDir() { logger.Fatal("%s is not a directory", downloadDestination) } // Find the artifacts that we want to download searcher := ArtifactSearcher{BuildID: a.BuildID, APIClient: a.APIClient} artifacts, err := searcher.Search(a.Query, a.Step) if err != nil { return err } artifactCount := len(artifacts) if artifactCount == 0 { logger.Info("No artifacts found for downloading") } else { logger.Info("Found %d artifacts. Starting to download to: %s", artifactCount, downloadDestination) p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { var err error // Handle downloading from S3 and GS if strings.HasPrefix(artifact.UploadDestination, "s3://") { err = S3Downloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else if strings.HasPrefix(artifact.UploadDestination, "gs://") { err = GSDownloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else { err = Download{ URL: artifact.URL, Path: artifact.Path, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } // If the downloaded encountered an error, lock // the pool, collect it, then unlock the pool // again. if err != nil { logger.Error("Failed to download artifact: %s", err) p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with downloading some of the artifacts") } } return nil }
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination, a.APIClient.DebugHTTP) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, UploadDestination: a.Destination, } artifacts, err = batchCreator.Create() if err != nil { return err } // Prepare a concurrency pool to upload the artifacts p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} // Create a wait group so we can make sure the uploader waits for all // the artifact states to upload before finishing var stateUploaderWaitGroup sync.WaitGroup stateUploaderWaitGroup.Add(1) // A map to keep track of artifact states and how many we've uploaded artifactsStates := make(map[string]string) artifactStatesUploaded := 0 // Spin up a gourtine that'll uploading artifact statuses every few // seconds in batches go func() { for artifactStatesUploaded < len(artifacts) { statesToUpload := make(map[string]string) // Grab all the states we need to upload, and remove // them from the tracking map for id, state := range artifactsStates { statesToUpload[id] = state delete(artifactsStates, id) } if len(statesToUpload) > 0 { artifactStatesUploaded += len(statesToUpload) for id, state := range statesToUpload { logger.Debug("Artifact `%s` has state `%s`", id, state) } // Update the states of the artifacts in bulk. err = retry.Do(func(s *retry.Stats) error { _, err = a.APIClient.Artifacts.Update(a.JobID, statesToUpload) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error uploading artifact states: %s", err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } logger.Debug("Uploaded %d artfact states (%d/%d)", len(statesToUpload), artifactStatesUploaded, len(artifacts)) } // Check again for states to upload in a few seconds time.Sleep(1 * time.Second) } stateUploaderWaitGroup.Done() }() for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) var state string // Did the upload eventually fail? if err != nil { logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() state = "error" } else { state = "finished" } artifactsStates[artifact.ID] = state }) } // Wait for the pool to finish p.Wait() // Wait for the statuses to finish uploading stateUploaderWaitGroup.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, } artifacts, err = batchCreator.Create() if err != nil { return err } p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { artifact.State = "error" logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } else { artifact.State = "finished" } // Update the state of the artifact on Buildkite, we // retry this as well. err = retry.Do(func(s *retry.Stats) error { _, _, err = a.APIClient.Artifacts.Update(a.JobID, artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error marking artifact %s as uploaded: %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }