func (a *ArtifactBatchCreator) Create() ([]*api.Artifact, error) { length := len(a.Artifacts) chunks := 30 // Split into the artifacts into chunks so we're not uploading a ton of // files at once. for i := 0; i < length; i += chunks { j := i + chunks if length < j { j = length } // The artifacts that will be uploaded in this chunk theseArtiacts := a.Artifacts[i:j] // An ID is required so Buildkite can ensure this create // operation is idompotent (if we try and upload the same ID // twice, it'll just return the previous data and skip the // upload) batch := &api.ArtifactBatch{api.NewUUID(), theseArtiacts, a.UploadDestination} logger.Info("Creating (%d-%d)/%d artifacts", i, j, length) var creation *api.ArtifactBatchCreateResponse var resp *api.Response var err error // Retry the batch upload a couple of times err = retry.Do(func(s *retry.Stats) error { creation, resp, err = a.APIClient.Artifacts.Create(a.JobID, batch) if resp != nil && (resp.StatusCode == 401 || resp.StatusCode == 404 || resp.StatusCode == 500) { s.Break() } if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 10, Interval: 1 * time.Second}) // Did the batch creation eventually fail? if err != nil { return nil, err } // Save the id and instructions to each artifact index := 0 for _, id := range creation.ArtifactIDs { theseArtiacts[index].ID = id theseArtiacts[index].UploadInstructions = creation.UploadInstructions index += 1 } } return a.Artifacts, nil }
func (b *Buildkite) InsertPipelineSteps(steps []interface{}) error { client := b.agentClient pipelineBytes, err := yaml.Marshal(map[string]interface{}{ "steps": steps, }) pipeline := &buildkite.Pipeline{ UUID: buildkite.NewUUID(), Data: pipelineBytes, FileName: "pipeline.yaml", } _, err = client.Pipelines.Upload(b.jobId, pipeline) return err }
input, err = ioutil.ReadFile(path) if err != nil { logger.Fatal("Failed to read file: %s", err) } } // Create the API client client := agent.APIClient{ Endpoint: cfg.Endpoint, Token: cfg.AgentAccessToken, }.Create() // Generate a UUID that will identifiy this pipeline change. We // do this outside of the retry loop because we want this UUID // to be the same for each attempt at updating the pipeline. uuid := api.NewUUID() // Retry the pipeline upload a few times before giving up err = retry.Do(func(s *retry.Stats) error { _, err = client.Pipelines.Upload(cfg.Job, &api.Pipeline{UUID: uuid, Data: input, FileName: filename, Replace: cfg.Replace}) if err != nil { logger.Warn("%s (%s)", err, s) } return err }, &retry.Config{Maximum: 5, Interval: 1 * time.Second}) if err != nil { logger.Fatal("Failed to upload and process pipeline: %s", err) } logger.Info("Successfully uploaded and parsed pipeline config")