func (u *S3Uploader) Upload(artifact *api.Artifact) error { permission := "public-read" if os.Getenv("BUILDKITE_S3_ACL") != "" { permission = os.Getenv("BUILDKITE_S3_ACL") } else if os.Getenv("AWS_S3_ACL") != "" { permission = os.Getenv("AWS_S3_ACL") } // The dirtiest validation method ever... if permission != "private" && permission != "public-read" && permission != "public-read-write" && permission != "authenticated-read" && permission != "bucket-owner-read" && permission != "bucket-owner-full-control" { logger.Fatal("Invalid S3 ACL `%s`", permission) } Perms := s3.ACL(permission) logger.Debug("Reading file \"%s\"", artifact.AbsolutePath) data, err := ioutil.ReadFile(artifact.AbsolutePath) if err != nil { return errors.New("Failed to read file " + artifact.AbsolutePath + " (" + err.Error() + ")") } logger.Debug("Uploading \"%s\" to bucket with permission `%s`", u.artifactPath(artifact), permission) err = u.Bucket.Put(u.artifactPath(artifact), data, u.mimeType(artifact), Perms, s3.Options{}) if err != nil { return errors.New(fmt.Sprintf("Failed to PUT file \"%s\" (%s)", u.artifactPath(artifact), err.Error())) } return nil }
func (u *GSUploader) Upload(artifact *api.Artifact) error { permission := os.Getenv("BUILDKITE_GS_ACL") // The dirtiest validation method ever... if permission != "" && permission != "authenticatedRead" && permission != "private" && permission != "projectPrivate" && permission != "publicRead" && permission != "publicReadWrite" { logger.Fatal("Invalid GS ACL `%s`", permission) } if permission == "" { logger.Debug("Uploading \"%s\" to bucket \"%s\" with default permission", u.artifactPath(artifact), u.BucketName()) } else { logger.Debug("Uploading \"%s\" to bucket \"%s\" with permission \"%s\"", u.artifactPath(artifact), u.BucketName(), permission) } object := &storage.Object{ Name: u.artifactPath(artifact), ContentType: u.mimeType(artifact), } file, err := os.Open(artifact.AbsolutePath) if err != nil { return errors.New(fmt.Sprintf("Failed to open file \"%q\" (%v)", artifact.AbsolutePath, err)) } call := u.Service.Objects.Insert(u.BucketName(), object) if permission != "" { call = call.PredefinedAcl(permission) } if res, err := call.Media(file).Do(); err == nil { logger.Debug("Created object %v at location %v\n\n", res.Name, res.SelfLink) } else { return errors.New(fmt.Sprintf("Failed to PUT file \"%s\" (%v)", u.artifactPath(artifact), err)) } return nil }
Usage: "Which job should the artifacts be uploaded to", EnvVar: "BUILDKITE_JOB_ID", }, AgentAccessTokenFlag, EndpointFlag, NoColorFlag, DebugFlag, DebugHTTPFlag, }, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := ArtifactUploadConfig{} // Load the configuration if err := cliconfig.Load(c, &cfg); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Setup the uploader uploader := agent.ArtifactUploader{ APIClient: agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create(), JobID: cfg.Job, Paths: cfg.UploadPaths, Destination: cfg.Destination, }
EnvVar: "BUILDKITE_JOB_ID", }, AgentAccessTokenFlag, EndpointFlag, NoColorFlag, DebugFlag, DebugHTTPFlag, }, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := PipelineUploadConfig{} // Load the configuration loader := cliconfig.Loader{CLI: c, Config: &cfg} if err := loader.Load(); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Find the pipeline file either from STDIN or the first // argument var input []byte var err error var filename string if cfg.FilePath != "" { filename = filepath.Base(cfg.FilePath) input, err = ioutil.ReadFile(cfg.FilePath) if err != nil {
EnvVar: "BUILDKITE_BUILD_ID", Usage: "The build that the artifacts were uploaded to", }, AgentAccessTokenFlag, EndpointFlag, NoColorFlag, DebugFlag, DebugHTTPFlag, }, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := ArtifactShasumConfig{} // Load the configuration if err := cliconfig.Load(c, &cfg); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Find the artifact we want to show the SHASUM for searcher := agent.ArtifactSearcher{ APIClient: agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create(), BuildID: cfg.Build, } artifacts, err := searcher.Search(cfg.Query, cfg.Step)
Usage: "Which job should the meta-data be retrieved from", EnvVar: "BUILDKITE_JOB_ID", }, AgentAccessTokenFlag, EndpointFlag, NoColorFlag, DebugFlag, DebugHTTPFlag, }, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := MetaDataGetConfig{} // Load the configuration if err := cliconfig.Load(c, &cfg); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Create the API client client := agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create() // Find the meta data value var metaData *api.MetaData var err error var resp *api.Response
}, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := AgentStartConfig{} // Setup the config loader. You'll see that we also path paths to // potential config files. The loader will use the first one it finds. loader := cliconfig.Loader{ CLI: c, Config: &cfg, DefaultConfigFilePaths: DefaultConfigFilePaths(), } // Load the configuration if err := loader.Load(); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Force some settings if on Windows (these aren't supported // yet) if runtime.GOOS == "windows" { cfg.NoPTY = true } // Setup the agent pool := agent.AgentPool{ Token: cfg.Token, Name: cfg.Name,
func (r *AgentPool) Start() error { // Show the welcome banner and config options used r.ShowBanner() // Create the agent registration API Client r.APIClient = APIClient{Endpoint: r.Endpoint, Token: r.Token}.Create() // Create the agent template. We use pass this template to the register // call, at which point we get back a real agent. template := r.CreateAgentTemplate() logger.Info("Registering agent with Buildkite...") // Register the agent registered, err := r.RegisterAgent(template) if err != nil { logger.Fatal("%s", err) } logger.Info("Successfully registered agent \"%s\" with meta-data %s", registered.Name, registered.MetaData) logger.Debug("Ping interval: %ds", registered.PingInterval) logger.Debug("Heartbeat interval: %ds", registered.HearbeatInterval) // Now that we have a registereted agent, we can connect it to the API, // and start running jobs. worker := AgentWorker{Agent: registered, AgentConfiguration: r.AgentConfiguration, Endpoint: r.Endpoint}.Create() logger.Info("Connecting to Buildkite...") if err := worker.Connect(); err != nil { logger.Fatal("%s", err) } logger.Info("Agent successfully connected") logger.Info("You can press Ctrl-C to stop the agent") logger.Info("Waiting for work...") // Start a signalwatcher so we can monitor signals and handle shutdowns signalwatcher.Watch(func(sig signalwatcher.Signal) { if sig == signalwatcher.QUIT { logger.Debug("Received signal `%s`", sig.String()) worker.Stop(false) } else if sig == signalwatcher.TERM || sig == signalwatcher.INT { logger.Debug("Received signal `%s`", sig.String()) worker.Stop(true) } else { logger.Debug("Ignoring signal `%s`", sig.String()) } }) // Starts the agent worker. This will block until the agent has // finished or is stopped. if err := worker.Start(); err != nil { logger.Fatal("%s", err) } // Now that the agent has stopped, we can disconnect it logger.Info("Disconnecting %s...", worker.Agent.Name) worker.Disconnect() return nil }
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination, a.APIClient.DebugHTTP) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, UploadDestination: a.Destination, } artifacts, err = batchCreator.Create() if err != nil { return err } // Prepare a concurrency pool to upload the artifacts p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} // Create a wait group so we can make sure the uploader waits for all // the artifact states to upload before finishing var stateUploaderWaitGroup sync.WaitGroup stateUploaderWaitGroup.Add(1) // A map to keep track of artifact states and how many we've uploaded artifactsStates := make(map[string]string) artifactStatesUploaded := 0 // Spin up a gourtine that'll uploading artifact statuses every few // seconds in batches go func() { for artifactStatesUploaded < len(artifacts) { statesToUpload := make(map[string]string) // Grab all the states we need to upload, and remove // them from the tracking map for id, state := range artifactsStates { statesToUpload[id] = state delete(artifactsStates, id) } if len(statesToUpload) > 0 { artifactStatesUploaded += len(statesToUpload) for id, state := range statesToUpload { logger.Debug("Artifact `%s` has state `%s`", id, state) } // Update the states of the artifacts in bulk. err = retry.Do(func(s *retry.Stats) error { _, err = a.APIClient.Artifacts.Update(a.JobID, statesToUpload) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error uploading artifact states: %s", err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } logger.Debug("Uploaded %d artfact states (%d/%d)", len(statesToUpload), artifactStatesUploaded, len(artifacts)) } // Check again for states to upload in a few seconds time.Sleep(1 * time.Second) } stateUploaderWaitGroup.Done() }() for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) var state string // Did the upload eventually fail? if err != nil { logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() state = "error" } else { state = "finished" } artifactsStates[artifact.ID] = state }) } // Wait for the pool to finish p.Wait() // Wait for the statuses to finish uploading stateUploaderWaitGroup.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }
Usage: "Which job should the meta-data be checked for", EnvVar: "BUILDKITE_JOB_ID", }, AgentAccessTokenFlag, EndpointFlag, NoColorFlag, DebugFlag, DebugHTTPFlag, }, Action: func(c *cli.Context) { // The configuration will be loaded into this struct cfg := MetaDataExistsConfig{} // Load the configuration if err := cliconfig.Load(c, &cfg); err != nil { logger.Fatal("%s", err) } // Setup the any global configuration options HandleGlobalFlags(cfg) // Create the API client client := agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create() // Find the meta data value var err error var exists *api.MetaDataExists var resp *api.Response
func (a *ArtifactDownloader) Download() error { // Turn the download destination into an absolute path and confirm it exists downloadDestination, _ := filepath.Abs(a.Destination) fileInfo, err := os.Stat(downloadDestination) if err != nil { logger.Fatal("Could not find information about destination: %s", downloadDestination) } if !fileInfo.IsDir() { logger.Fatal("%s is not a directory", downloadDestination) } // Find the artifacts that we want to download searcher := ArtifactSearcher{BuildID: a.BuildID, APIClient: a.APIClient} artifacts, err := searcher.Search(a.Query, a.Step) if err != nil { return err } artifactCount := len(artifacts) if artifactCount == 0 { logger.Info("No artifacts found for downloading") } else { logger.Info("Found %d artifacts. Starting to download to: %s", artifactCount, downloadDestination) p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { var err error // Handle downloading from S3 and GS if strings.HasPrefix(artifact.UploadDestination, "s3://") { err = S3Downloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else if strings.HasPrefix(artifact.UploadDestination, "gs://") { err = GSDownloader{ Path: artifact.Path, Bucket: artifact.UploadDestination, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } else { err = Download{ URL: artifact.URL, Path: artifact.Path, Destination: downloadDestination, Retries: 5, DebugHTTP: a.APIClient.DebugHTTP, }.Start() } // If the downloaded encountered an error, lock // the pool, collect it, then unlock the pool // again. if err != nil { logger.Error("Failed to download artifact: %s", err) p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with downloading some of the artifacts") } } return nil }
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error { var uploader Uploader // Determine what uploader to use if a.Destination != "" { if strings.HasPrefix(a.Destination, "s3://") { uploader = new(S3Uploader) } else { return errors.New("Unknown upload destination: " + a.Destination) } } else { uploader = new(FormUploader) } // Setup the uploader err := uploader.Setup(a.Destination) if err != nil { return err } // Set the URL's of the artifacts based on the uploader for _, artifact := range artifacts { artifact.URL = uploader.URL(artifact) } // Create the artifacts on Buildkite batchCreator := ArtifactBatchCreator{ APIClient: a.APIClient, JobID: a.JobID, Artifacts: artifacts, } artifacts, err = batchCreator.Create() if err != nil { return err } p := pool.New(pool.MaxConcurrencyLimit) errors := []error{} for _, artifact := range artifacts { // Create new instance of the artifact for the goroutine // See: http://golang.org/doc/effective_go.html#channels artifact := artifact p.Spawn(func() { // Show a nice message that we're starting to upload the file logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize) // Upload the artifact and then set the state depending // on whether or not it passed. We'll retry the upload // a couple of times before giving up. err = retry.Do(func(s *retry.Stats) error { err := uploader.Upload(artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { artifact.State = "error" logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } else { artifact.State = "finished" } // Update the state of the artifact on Buildkite, we // retry this as well. err = retry.Do(func(s *retry.Stats) error { _, _, err = a.APIClient.Artifacts.Update(a.JobID, artifact) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) if err != nil { logger.Error("Error marking artifact %s as uploaded: %s", artifact.Path, err) // Track the error that was raised p.Lock() errors = append(errors, err) p.Unlock() } }) } p.Wait() if len(errors) > 0 { logger.Fatal("There were errors with uploading some of the artifacts") } return nil }