Beispiel #1
0
// Stops the agent from accepting new work and cancels any current work it's
// running
func (a *AgentWorker) Stop() {
	// Only allow one stop to run at a time (because we're playing with channels)
	a.stopMutex.Lock()

	if a.stopping {
		logger.Debug("Agent is already stopping...")
		return
	} else {
		logger.Debug("Stopping the agent...")
	}

	// If ther'es a running job, kill it.
	if a.jobRunner != nil {
		a.jobRunner.Kill()
	}

	// If we have a ticker, stop it, and send a signal to the stop channel,
	// which will cause the agent worker to stop looping immediatly.
	if a.ticker != nil {
		close(a.stop)
	}

	// Mark the agent as stopping
	a.stopping = true

	// Unlock the stop mutex
	a.stopMutex.Unlock()
}
Beispiel #2
0
// The actual log streamer worker
func Worker(id int, ls *LogStreamer) {
	logger.Debug("[LogStreamer/Worker#%d] Worker is starting...", id)

	var chunk *LogStreamerChunk
	for {
		// Get the next chunk (pointer) from the queue. This will block
		// until something is returned.
		chunk = <-ls.queue

		// If the next chunk is nil, then there is no more work to do
		if chunk == nil {
			break
		}

		// Upload the chunk
		err := ls.Callback(chunk)
		if err != nil {
			atomic.AddInt32(&ls.ChunksFailedCount, 1)

			logger.Error("Giving up on uploading chunk %d, this will result in only a partial build log on Buildkite", chunk.Order)
		}

		// Signal to the chunkWaitGroup that this one is done
		ls.chunkWaitGroup.Done()
	}

	logger.Debug("[LogStreamer/Worker#%d] Worker has shutdown", id)
}
Beispiel #3
0
func (u *S3Uploader) Upload(artifact *api.Artifact) error {
	permission := "public-read"
	if os.Getenv("BUILDKITE_S3_ACL") != "" {
		permission = os.Getenv("BUILDKITE_S3_ACL")
	} else if os.Getenv("AWS_S3_ACL") != "" {
		permission = os.Getenv("AWS_S3_ACL")
	}

	// The dirtiest validation method ever...
	if permission != "private" &&
		permission != "public-read" &&
		permission != "public-read-write" &&
		permission != "authenticated-read" &&
		permission != "bucket-owner-read" &&
		permission != "bucket-owner-full-control" {
		logger.Fatal("Invalid S3 ACL `%s`", permission)
	}

	Perms := s3.ACL(permission)

	logger.Debug("Reading file \"%s\"", artifact.AbsolutePath)
	data, err := ioutil.ReadFile(artifact.AbsolutePath)
	if err != nil {
		return errors.New("Failed to read file " + artifact.AbsolutePath + " (" + err.Error() + ")")
	}

	logger.Debug("Uploading \"%s\" to bucket with permission `%s`", u.artifactPath(artifact), permission)
	err = u.Bucket.Put(u.artifactPath(artifact), data, u.mimeType(artifact), Perms, s3.Options{})
	if err != nil {
		return errors.New(fmt.Sprintf("Failed to PUT file \"%s\" (%s)", u.artifactPath(artifact), err.Error()))
	}

	return nil
}
Beispiel #4
0
func (h *HeaderTimesStreamer) Start() error {
	h.streaming = true

	go func() {
		logger.Debug("[HeaderTimesStreamer] Streamer has started...")

		for true {
			// Break out of streaming if it's finished. We also
			// need to aquire a read lock on the flag because it
			// can be modified by other routines.
			h.streamingMutex.Lock()
			if !h.streaming {
				break
			}
			h.streamingMutex.Unlock()

			// Upload any pending header times
			h.Upload()

			// Sleep for a second and try upload some more later
			time.Sleep(1 * time.Second)
		}

		logger.Debug("[HeaderTimesStreamer] Streamer has finished...")
	}()

	return nil
}
Beispiel #5
0
func (h *HeaderTimesStreamer) Upload() {
	// Store the current cursor value
	c := h.cursor

	// Grab only the times that we haven't uploaded yet. We need to aquire
	// a lock since other routines may be adding to it.
	h.timesMutex.Lock()
	length := len(h.times)
	times := h.times[h.cursor:length]
	h.timesMutex.Unlock()

	// Construct the payload to send to the server
	payload := map[string]string{}
	for index, time := range times {
		payload[strconv.Itoa(h.cursor+index)] = time
	}

	// Save the cursor we're up to
	h.cursor = length

	// How many times are we uploading this time
	timesToUpload := len(times)

	// Do we even have some times to upload
	if timesToUpload > 0 {
		// Call our callback with the times for upload
		logger.Debug("[HeaderTimesStreamer] Uploading header times %d..%d", c, length-1)
		h.UploadCallback(c, length, payload)
		logger.Debug("[HeaderTimesStreamer] Finished uploading header times %d..%d", c, length-1)

		// Decrement the wait group for every time we've uploaded.
		h.uploadWaitGroup.Add(timesToUpload * -1)
	}
}
Beispiel #6
0
// Do sends an API request and returns the API response. The API response is
// JSON decoded and stored in the value pointed to by v, or returned as an
// error if an API error has occurred.  If v implements the io.Writer
// interface, the raw response body will be written to v, without attempting to
// first decode it.
func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
	var err error

	if c.DebugHTTP {
		// If the request is a multi-part form, then it's probably a
		// file upload, in which case we don't want to spewing out the
		// file contents into the debug log (especially if it's been
		// gzipped)
		var requestDump []byte
		if strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") {
			requestDump, err = httputil.DumpRequestOut(req, false)
		} else {
			requestDump, err = httputil.DumpRequestOut(req, true)
		}

		logger.Debug("ERR: %s\n%s", err, string(requestDump))
	}

	ts := time.Now()

	logger.Debug("%s %s", req.Method, req.URL)

	resp, err := c.client.Do(req)
	if err != nil {
		return nil, err
	}

	logger.Debug("↳ %s %s (%s %s)", req.Method, req.URL, resp.Status, time.Now().Sub(ts))

	defer resp.Body.Close()
	defer io.Copy(ioutil.Discard, resp.Body)

	response := newResponse(resp)

	if c.DebugHTTP {
		responseDump, err := httputil.DumpResponse(resp, true)
		logger.Debug("\nERR: %s\n%s", err, string(responseDump))
	}

	err = checkResponse(resp)
	if err != nil {
		// even though there was an error, we still return the response
		// in case the caller wants to inspect it further
		return response, err
	}

	if v != nil {
		if w, ok := v.(io.Writer); ok {
			io.Copy(w, resp.Body)
		} else {
			err = json.NewDecoder(resp.Body).Decode(v)
		}
	}

	return response, err
}
Beispiel #7
0
func (h *HeaderTimesStreamer) Stop() {
	logger.Debug("[HeaderTimesStreamer] Waiting for all the lines to be scanned")
	h.scanWaitGroup.Wait()

	logger.Debug("[HeaderTimesStreamer] Waiting for all the header times to be uploaded")
	h.uploadWaitGroup.Wait()

	// Since we're modifying the waitGroup and the streaming flag, we need
	// to aquire a write lock.
	h.streamingMutex.Lock()
	h.streaming = false
	h.streamingMutex.Unlock()
}
Beispiel #8
0
// Waits for all the chunks to be uploaded, then shuts down all the workers
func (ls *LogStreamer) Stop() error {
	logger.Debug("[LogStreamer] Waiting for all the chunks to be uploaded")

	ls.chunkWaitGroup.Wait()

	logger.Debug("[LogStreamer] Shutting down all workers")

	for n := 0; n < ls.Concurrency; n++ {
		ls.queue <- nil
	}

	return nil
}
Beispiel #9
0
// Runs the job
func (r *JobRunner) Run() error {
	logger.Info("Starting job %s", r.Job.ID)

	// Start the build in the Buildkite Agent API. This is the first thing
	// we do so if it fails, we don't have to worry about cleaning things
	// up like started log streamer workers, etc.
	if err := r.startJob(time.Now()); err != nil {
		return err
	}

	// Start the log streamer
	if err := r.logStreamer.Start(); err != nil {
		return err
	}

	// Start the process. This will block until it finishes.
	if err := r.process.Start(); err != nil {
		// Send the error as output
		r.logStreamer.Process(fmt.Sprintf("%s", err))
	} else {
		// Add the final output to the streamer
		r.logStreamer.Process(r.process.Output())
	}

	// Store the finished at time
	finishedAt := time.Now()

	// Wait until all the header times have finished uploading
	logger.Debug("Waiting for header times to finish uploading")
	r.headerTimesStreamer.Wait()

	// Stop the log streamer. This will block until all the chunks have
	// been uploaded
	r.logStreamer.Stop()

	// Warn about failed chunks
	if r.logStreamer.ChunksFailedCount > 0 {
		logger.Warn("%d chunks failed to upload for this job", r.logStreamer.ChunksFailedCount)
	}

	// Finish the build in the Buildkite Agent API
	r.finishJob(finishedAt, r.process.ExitStatus, int(r.logStreamer.ChunksFailedCount))

	// Wait for the routines that we spun up to finish
	logger.Debug("Waiting for all other routines to finish")
	r.wg.Wait()

	logger.Info("Finished job %s", r.Job.ID)

	return nil
}
Beispiel #10
0
func (r *JobRunner) onProcessStartCallback() {
	// Start a routine that will grab the output every few seconds and send
	// it back to Buildkite
	go func() {
		// Add to the wait group
		r.wg.Add(1)

		for r.process.Running {
			// Send the output of the process to the log streamer
			// for processing
			r.logStreamer.Process(r.process.Output())

			// Check the output in another second
			time.Sleep(1 * time.Second)
		}

		// Mark this routine as done in the wait group
		r.wg.Done()

		logger.Debug("Routine that processes the log has finished")
	}()

	// Start a routine that will grab the output every few seconds and send it back to Buildkite
	go func() {
		// Add to the wait group
		r.wg.Add(1)

		for r.process.Running {
			// Re-get the job and check it's status to see if it's been
			// cancelled
			jobState, _, err := r.APIClient.Jobs.GetState(r.Job.ID)
			if err != nil {
				// We don't really care if it fails, we'll just
				// try again in a second anyway
				logger.Warn("Problem with getting job state %s (%s)", r.Job.ID, err)
			} else if jobState.State == "canceling" || jobState.State == "canceled" {
				r.Kill()
			}

			// Check for cancellations every few seconds
			time.Sleep(3 * time.Second)
		}

		// Mark this routine as done in the wait group
		r.wg.Done()

		logger.Debug("Routine that refreshes the job has finished")
	}()
}
Beispiel #11
0
func (p *Process) signal(sig os.Signal) error {
	if p.command != nil && p.command.Process != nil {
		logger.Debug("[Process] Sending signal: %s to PID: %d", sig.String(), p.Pid)

		err := p.command.Process.Signal(syscall.SIGTERM)
		if err != nil {
			logger.Error("[Process] Failed to send signal: %s to PID: %d (%T: %v)", sig.String(), p.Pid, err, err)
			return err
		}
	} else {
		logger.Debug("[Process] No process to signal yet")
	}

	return nil
}
Beispiel #12
0
func (a *ArtifactUploader) collect() (artifacts []*api.Artifact, err error) {
	globPaths := strings.Split(a.Paths, ";")

	for _, globPath := range globPaths {
		workingDirectory := a.WorkingDirectory(globPath)
		globPath = strings.TrimSpace(globPath)

		if globPath != "" {
			logger.Debug("Searching for %s", a.NormalizedPath(globPath))

			files, err := glob.Glob(workingDirectory, globPath)
			if err != nil {
				return nil, err
			}

			for _, file := range files {
				// Generate an absolute path for the artifact
				absolutePath, err := filepath.Abs(file)
				if err != nil {
					return nil, err
				}

				fileInfo, err := os.Stat(absolutePath)
				if fileInfo.IsDir() {
					logger.Debug("Skipping directory %s", file)
					continue
				}

				// Create a relative path (from the workingDirectory) to the artifact, by removing the
				// first part of the absolutePath that is the workingDirectory.
				relativePath := strings.Replace(absolutePath, workingDirectory, "", 1)

				// Ensure the relativePath doesn't have a file seperator "/" as the first character
				relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))

				// Build an artifact object using the paths we have.
				artifact, err := a.build(relativePath, absolutePath, globPath)
				if err != nil {
					return nil, err
				}

				artifacts = append(artifacts, artifact)
			}
		}
	}

	return artifacts, nil
}
Beispiel #13
0
func (u *S3Uploader) Setup(destination string, debugHTTP bool) error {
	u.Destination = destination
	u.DebugHTTP = debugHTTP

	// Try to auth with S3
	auth, err := awsS3Auth()
	if err != nil {
		return errors.New(fmt.Sprintf("Error creating AWS S3 authentication: %s", err.Error()))
	}

	// Try and get the region
	region, err := awsS3Region()
	if err != nil {
		return err
	}

	logger.Debug("Authorizing S3 credentials and finding bucket `%s` in region `%s`...", u.BucketName(), region.Name)

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(u.BucketName())

	// If the list doesn't return an error, then we've got our bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + u.BucketName() + "` in region `" + region.Name + "` (" + err.Error() + ")")
	}

	u.Bucket = bucket

	return nil
}
Beispiel #14
0
func Run(command string, arg ...string) (string, error) {
	output, err := exec.Command(command, arg...).Output()

	if err != nil {
		logger.Debug("Could not run: %s %s (returned %s) (%T: %v)", command, arg, output, err, err)
		return "", err
	}

	return strings.Trim(fmt.Sprintf("%s", output), "\n"), nil
}
Beispiel #15
0
func (r *JobRunner) onUploadHeaderTime(cursor int, total int, times map[string]string) {
	retry.Do(func(s *retry.Stats) error {
		logger.Debug("Uploading header times %d..%d (%d)", cursor+1, total, len(times))

		_, err := r.APIClient.HeaderTimes.Save(r.Job.ID, &api.HeaderTimes{Times: times})
		if err != nil {
			logger.Warn("%s (%s)", err, s)
		}

		return err
	}, &retry.Config{Maximum: 10, Interval: 1 * time.Second})
}
Beispiel #16
0
func (u *GSUploader) Upload(artifact *api.Artifact) error {
	permission := os.Getenv("BUILDKITE_GS_ACL")

	// The dirtiest validation method ever...
	if permission != "" &&
		permission != "authenticatedRead" &&
		permission != "private" &&
		permission != "projectPrivate" &&
		permission != "publicRead" &&
		permission != "publicReadWrite" {
		logger.Fatal("Invalid GS ACL `%s`", permission)
	}

	if permission == "" {
		logger.Debug("Uploading \"%s\" to bucket \"%s\" with default permission",
			u.artifactPath(artifact), u.BucketName())
	} else {
		logger.Debug("Uploading \"%s\" to bucket \"%s\" with permission \"%s\"",
			u.artifactPath(artifact), u.BucketName(), permission)
	}
	object := &storage.Object{
		Name:        u.artifactPath(artifact),
		ContentType: u.mimeType(artifact),
	}
	file, err := os.Open(artifact.AbsolutePath)
	if err != nil {
		return errors.New(fmt.Sprintf("Failed to open file \"%q\" (%v)", artifact.AbsolutePath, err))
	}
	call := u.Service.Objects.Insert(u.BucketName(), object)
	if permission != "" {
		call = call.PredefinedAcl(permission)
	}
	if res, err := call.Media(file).Do(); err == nil {
		logger.Debug("Created object %v at location %v\n\n", res.Name, res.SelfLink)
	} else {
		return errors.New(fmt.Sprintf("Failed to PUT file \"%s\" (%v)", u.artifactPath(artifact), err))
	}

	return nil
}
Beispiel #17
0
func (d S3Downloader) Start() error {
	// Try to auth with S3
	auth, err := awsS3Auth()
	if err != nil {
		return errors.New(fmt.Sprintf("Error creating AWS S3 authentication: %s", err.Error()))
	}

	// Try and get the region
	region, err := awsS3Region()
	if err != nil {
		return err
	}

	// Split apart the bucket
	bucketParts := strings.Split(strings.TrimLeft(d.Bucket, "s3://"), "/")
	bucketName := bucketParts[0]
	bucketPath := strings.Join(bucketParts[1:len(bucketParts)], "/")

	logger.Debug("Authorizing S3 credentials and finding bucket `%s` in region `%s`...", bucketName, region.Name)

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(bucketName)

	// If the list doesn't return an error, then we've got our bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + bucketName + "` in region `" + region.Name + "` (" + err.Error() + ")")
	}

	// Create the location of the file
	var s3Location string
	if bucketPath != "" {
		s3Location = strings.TrimRight(bucketPath, "/") + "/" + strings.TrimLeft(d.Path, "/")
	} else {
		s3Location = d.Path
	}

	// Generate a Signed URL
	signedURL := bucket.SignedURL(s3Location, time.Now().Add(time.Hour))

	// We can now cheat and pass the URL onto our regular downloader
	return Download{
		URL:         signedURL,
		Path:        d.Path,
		Destination: d.Destination,
		Retries:     d.Retries,
		DebugHTTP:   d.DebugHTTP,
	}.Start()
}
Beispiel #18
0
func (h *HeaderTimesStreamer) Scan(line string) {
	// Keep track of how many line scans we need to do
	h.scanWaitGroup.Add(1)
	defer h.scanWaitGroup.Done()

	if h.lineIsHeader(line) {
		logger.Debug("[HeaderTimesStreamer] Found header %q", line)

		// Aquire a lock on the times and then add the current time to
		// our times slice.
		h.timesMutex.Lock()
		h.times = append(h.times, time.Now().UTC().Format(time.RFC3339Nano))
		h.timesMutex.Unlock()

		// Add the time to the wait group
		h.uploadWaitGroup.Add(1)
	}
}
Beispiel #19
0
// Performs a heatbeat
func (a *AgentWorker) Heartbeat() error {
	var beat *api.Heartbeat
	var err error

	// Retry the heartbeat a few times
	err = retry.Do(func(s *retry.Stats) error {
		beat, _, err = a.APIClient.Heartbeats.Beat()
		if err != nil {
			logger.Warn("%s (%s)", err, s)
		}
		return err
	}, &retry.Config{Maximum: 5, Interval: 1 * time.Second})

	if err != nil {
		return err
	}

	logger.Debug("Heartbeat sent at %s and received at %s", beat.SentAt, beat.ReceivedAt)
	return nil
}
Beispiel #20
0
// Shows the welcome banner and the configuration options used when starting
// this agent.
func (r *AgentPool) ShowBanner() {
	welcomeMessage :=
		"\n" +
			"%s  _           _ _     _ _    _ _                                _\n" +
			" | |         (_) |   | | |  (_) |                              | |\n" +
			" | |__  _   _ _| | __| | | ___| |_ ___    __ _  __ _  ___ _ __ | |_\n" +
			" | '_ \\| | | | | |/ _` | |/ / | __/ _ \\  / _` |/ _` |/ _ \\ '_ \\| __|\n" +
			" | |_) | |_| | | | (_| |   <| | ||  __/ | (_| | (_| |  __/ | | | |_\n" +
			" |_.__/ \\__,_|_|_|\\__,_|_|\\_\\_|\\__\\___|  \\__,_|\\__, |\\___|_| |_|\\__|\n" +
			"                                                __/ |\n" +
			" http://buildkite.com/agent                    |___/\n%s\n"

	if logger.ColorsEnabled() {
		fmt.Fprintf(logger.OutputPipe(), welcomeMessage, "\x1b[32m", "\x1b[0m")
	} else {
		fmt.Fprintf(logger.OutputPipe(), welcomeMessage, "", "")
	}

	logger.Notice("Starting buildkite-agent v%s with PID: %s", Version(), fmt.Sprintf("%d", os.Getpid()))
	logger.Notice("The agent source code can be found here: https://github.com/buildkite/agent")
	logger.Notice("For questions and support, email us at: [email protected]")

	if r.ConfigFilePath != "" {
		logger.Info("Configuration loaded from: %s", r.ConfigFilePath)
	}

	logger.Debug("Bootstrap command: %s", r.AgentConfiguration.BootstrapScript)
	logger.Debug("Build path: %s", r.AgentConfiguration.BuildPath)
	logger.Debug("Hooks directory: %s", r.AgentConfiguration.HooksPath)
	logger.Debug("Plugins directory: %s", r.AgentConfiguration.PluginsPath)

	if !r.AgentConfiguration.SSHFingerprintVerification {
		logger.Debug("Automatic SSH fingerprint verification has been disabled")
	}

	if !r.AgentConfiguration.CommandEval {
		logger.Debug("Evaluating console commands has been disabled")
	}

	if !r.AgentConfiguration.RunInPty {
		logger.Debug("Running builds within a pseudoterminal (PTY) has been disabled")
	}
}
Beispiel #21
0
func (u *S3Uploader) Setup(destination string) error {
	u.Destination = destination

	// Try to auth with S3
	auth, err := awsS3Auth()
	if err != nil {
		return errors.New(fmt.Sprintf("Error creating AWS S3 authentication: %s", err.Error()))
	}

	regionName := "us-east-1"
	if os.Getenv("BUILDKITE_S3_DEFAULT_REGION") != "" {
		regionName = os.Getenv("BUILDKITE_S3_DEFAULT_REGION")
	} else if os.Getenv("AWS_DEFAULT_REGION") != "" {
		regionName = os.Getenv("AWS_DEFAULT_REGION")
	}

	// Check to make sure the region exists. There is a GetRegion API, but
	// there doesn't seem to be a way to make it error out if the region
	// doesn't exist.
	region, ok := aws.Regions[regionName]
	if ok == false {
		return errors.New("Unknown AWS S3 Region `" + regionName + "`")
	}

	logger.Debug("Authorizing S3 credentials and finding bucket `%s` in region `%s`...", u.bucketName(), regionName)

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(u.bucketName())

	// If the list doesn't return an error, then we've got our bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + u.bucketName() + "` in region `" + region.Name + "` (" + err.Error() + ")")
	}

	u.Bucket = bucket

	return nil
}
Beispiel #22
0
func (d S3Downloader) Start() error {
	// Try to auth with S3
	auth, err := awsS3Auth()
	if err != nil {
		return errors.New(fmt.Sprintf("Error creating AWS S3 authentication: %s", err.Error()))
	}

	// Try and get the region
	region, err := awsS3Region()
	if err != nil {
		return err
	}

	logger.Debug("Authorizing S3 credentials and finding bucket `%s` in region `%s`...", d.BucketName(), region.Name)

	// Find the bucket
	s3 := s3.New(auth, region)
	bucket := s3.Bucket(d.BucketName())

	// If the list doesn't return an error, then we've got our bucket
	_, err = bucket.List("", "", "", 0)
	if err != nil {
		return errors.New("Could not find bucket `" + d.BucketName() + "` in region `" + region.Name + "` (" + err.Error() + ")")
	}

	// Generate a Signed URL
	signedURL := bucket.SignedURL(d.BucketFileLocation(), time.Now().Add(time.Hour))

	// We can now cheat and pass the URL onto our regular downloader
	return Download{
		Client:      *http.DefaultClient,
		URL:         signedURL,
		Path:        d.Path,
		Destination: d.Destination,
		Retries:     d.Retries,
		DebugHTTP:   d.DebugHTTP,
	}.Start()
}
Beispiel #23
0
func (u *FormUploader) Upload(artifact *api.Artifact) error {
	// Create a HTTP request for uploading the file
	request, err := createUploadRequest(artifact)
	if err != nil {
		return err
	}

	// Create the client
	client := &http.Client{}

	// Perform the request
	logger.Debug("%s %s", request.Method, request.URL)
	response, err := client.Do(request)

	// Check for errors
	if err != nil {
		return err
	} else {
		// Be sure to close the response body at the end of
		// this function
		defer response.Body.Close()

		if response.StatusCode/100 != 2 {
			body := &bytes.Buffer{}
			_, err := body.ReadFrom(response.Body)
			if err != nil {
				return err
			}

			// Return a custom error with the response body from the page
			message := fmt.Sprintf("%s (%d)", body, response.StatusCode)
			return errors.New(message)
		}
	}

	return nil
}
Beispiel #24
0
		// Setup the any global configuration options
		HandleGlobalFlags(cfg)

		// Find the artifact we want to show the SHASUM for
		searcher := agent.ArtifactSearcher{
			APIClient: agent.APIClient{
				Endpoint: cfg.Endpoint,
				Token:    cfg.AgentAccessToken,
			}.Create(),
			BuildID: cfg.Build,
		}

		artifacts, err := searcher.Search(cfg.Query, cfg.Step)
		if err != nil {
			logger.Fatal("Failed to find artifacts: %s", err)
		}

		artifactsFoundLength := len(artifacts)

		if artifactsFoundLength == 0 {
			logger.Fatal("No artifacts found for downloading")
		} else if artifactsFoundLength > 1 {
			logger.Fatal("Multiple artifacts were found. Try being more specific with the search or scope by step")
		} else {
			logger.Debug("Artifact \"%s\" found", artifacts[0].Path)

			fmt.Printf("%s\n", artifacts[0].Sha1Sum)
		}
	},
}
Beispiel #25
0
func (d Download) try() error {
	// If we're downloading a file with a path of "pkg/foo.txt" to a folder
	// called "pkg", we should merge the two paths together. So, instead of it
	// downloading to: destination/pkg/pkg/foo.txt, it will just download to
	// destination/pkg/foo.txt
	destinationPaths := strings.Split(d.Destination, string(os.PathSeparator))
	downloadPaths := strings.Split(d.Path, string(os.PathSeparator))

	for i := 0; i < len(downloadPaths); i += 100 {
		// If the last part of the destination path matches
		// this path in the download, then cut it out.
		lastIndex := len(destinationPaths) - 1

		// Break if we've gone too far.
		if lastIndex == -1 {
			break
		}

		lastPathInDestination := destinationPaths[lastIndex]
		if lastPathInDestination == downloadPaths[i] {
			destinationPaths = destinationPaths[:lastIndex]
		}
	}

	finalizedDestination := strings.Join(destinationPaths, string(os.PathSeparator))

	targetFile := filepath.Join(finalizedDestination, d.Path)
	targetDirectory, _ := filepath.Split(targetFile)

	// Show a nice message that we're starting to download the file
	logger.Debug("Downloading %s to %s", d.URL, targetFile)

	// Start by downloading the file
	response, err := d.Client.Get(d.URL)
	if err != nil {
		return fmt.Errorf("Error while downloading %s (%T: %v)", d.URL, err, err)
	}
	defer response.Body.Close()

	// Double check the status
	if response.StatusCode/100 != 2 && response.StatusCode/100 != 3 {
		if d.DebugHTTP {
			responseDump, err := httputil.DumpResponse(response, true)
			logger.Debug("\nERR: %s\n%s", err, string(responseDump))
		}

		return &downloadError{response.Status}
	}

	// Now make the folder for our file
	err = os.MkdirAll(targetDirectory, 0777)
	if err != nil {
		return fmt.Errorf("Failed to create folder for %s (%T: %v)", targetFile, err, err)
	}

	// Create a file to handle the file
	fileBuffer, err := os.Create(targetFile)
	if err != nil {
		return fmt.Errorf("Failed to create file %s (%T: %v)", targetFile, err, err)
	}
	defer fileBuffer.Close()

	// Copy the data to the file
	bytes, err := io.Copy(fileBuffer, response.Body)
	if err != nil {
		return fmt.Errorf("Error when copying data %s (%T: %v)", d.URL, err, err)
	}

	logger.Info("Successfully downloaded \"%s\" %d bytes", d.Path, bytes)

	return nil
}
Beispiel #26
0
func (p *Process) Start() error {
	c, err := shell.CommandFromString(p.Script)
	if err != nil {
		return err
	}

	p.command = exec.Command(c.Command, c.Args...)

	// Copy the current processes ENV and merge in the new ones. We do this
	// so the sub process gets PATH and stuff. We merge our path in over
	// the top of the current one so the ENV from Buildkite and the agent
	// take precedence over the agent
	currentEnv := os.Environ()
	p.command.Env = append(currentEnv, p.Env...)

	var waitGroup sync.WaitGroup

	lineReaderPipe, lineWriterPipe := io.Pipe()

	multiWriter := io.MultiWriter(&p.buffer, lineWriterPipe)

	logger.Info("Starting to run: %s", c.String())

	// Toggle between running in a pty
	if p.PTY {
		pty, err := StartPTY(p.command)
		if err != nil {
			p.ExitStatus = "1"
			return err
		}

		p.Pid = p.command.Process.Pid
		p.setRunning(true)

		waitGroup.Add(1)

		go func() {
			logger.Debug("[Process] Starting to copy PTY to the buffer")

			// Copy the pty to our buffer. This will block until it
			// EOF's or something breaks.
			_, err = io.Copy(multiWriter, pty)
			if e, ok := err.(*os.PathError); ok && e.Err == syscall.EIO {
				// We can safely ignore this error, because
				// it's just the PTY telling us that it closed
				// successfully.  See:
				// https://github.com/buildkite/agent/pull/34#issuecomment-46080419
				err = nil
			}

			if err != nil {
				logger.Error("[Process] PTY output copy failed with error: %T: %v", err, err)
			} else {
				logger.Debug("[Process] PTY has finished being copied to the buffer")
			}

			waitGroup.Done()
		}()
	} else {
		p.command.Stdout = multiWriter
		p.command.Stderr = multiWriter
		p.command.Stdin = nil

		err := p.command.Start()
		if err != nil {
			p.ExitStatus = "1"
			return err
		}

		p.Pid = p.command.Process.Pid
		p.setRunning(true)
	}

	logger.Info("[Process] Process is running with PID: %d", p.Pid)

	// Add the line callback routine to the waitGroup
	waitGroup.Add(1)

	go func() {
		logger.Debug("[LineScanner] Starting to read lines")

		reader := bufio.NewReader(lineReaderPipe)

		var appending []byte

		for {
			line, isPrefix, err := reader.ReadLine()
			if err != nil {
				if err == io.EOF {
					logger.Debug("[LineScanner] Encountered EOF")
					break
				}

				logger.Error("[LineScanner] Failed to read: (%T: %v)", err, err)
			}

			// If isPrefix is true, that means we've got a really
			// long line incoming, and we'll keep appending to it
			// until isPrefix is false (which means the long line
			// has ended.
			if isPrefix && appending == nil {
				logger.Debug("[LineScanner] Line is too long to read, going to buffer it until it finishes")
				appending = line

				continue
			}

			// Should we be appending?
			if appending != nil {
				appending = append(appending, line...)

				// No more isPrefix! Line is finished!
				if !isPrefix {
					logger.Debug("[LineScanner] Finished buffering long line")
					line = appending

					// Reset appending back to nil
					appending = nil
				} else {
					continue
				}
			}

			go p.LineCallback(string(line))
		}

		logger.Debug("[LineScanner] Finished")

		waitGroup.Done()
	}()

	// Call the StartCallback
	go p.StartCallback()

	// Wait until the process has finished. The returned error is nil if the command runs,
	// has no problems copying stdin, stdout, and stderr, and exits with a zero exit status.
	waitResult := p.command.Wait()

	// Close the line writer pipe
	lineWriterPipe.Close()

	// The process is no longer running at this point
	p.setRunning(false)

	// Find the exit status of the script
	p.ExitStatus = getExitStatus(waitResult)

	logger.Info("Process with PID: %d finished with Exit Status: %s", p.Pid, p.ExitStatus)

	// Sometimes (in docker containers) io.Copy never seems to finish. This is a mega
	// hack around it. If it doesn't finish after 1 second, just continue.
	logger.Debug("[Process] Waiting for routines to finish")
	err = timeoutWait(&waitGroup)
	if err != nil {
		logger.Debug("[Process] Timed out waiting for wait group: (%T: %v)", err, err)
	}

	// No error occurred so we can return nil
	return nil
}
Beispiel #27
0
func (p *Process) Kill() error {
	// Send a sigterm
	err := p.signal(syscall.SIGTERM)
	if err != nil {
		return err
	}

	// Make a channel that we'll use as a timeout
	c := make(chan int, 1)
	checking := true

	// Start a routine that checks to see if the process
	// is still alive.
	go func() {
		for checking {
			logger.Debug("[Process] Checking to see if PID: %d is still alive", p.Pid)

			foundProcess, err := os.FindProcess(p.Pid)

			// Can't find the process at all
			if err != nil {
				logger.Debug("[Process] Could not find process with PID: %d", p.Pid)

				break
			}

			// We have some information about the process
			if foundProcess != nil {
				processState, err := foundProcess.Wait()

				if err != nil || processState.Exited() {
					logger.Debug("[Process] Process with PID: %d has exited.", p.Pid)

					break
				}
			}

			// Retry in a moment
			sleepTime := time.Duration(1 * time.Second)
			time.Sleep(sleepTime)
		}

		c <- 1
	}()

	// Timeout this process after 3 seconds
	select {
	case _ = <-c:
		// Was successfully terminated
	case <-time.After(10 * time.Second):
		// Stop checking in the routine above
		checking = false

		// Forcefully kill the thing
		err = p.signal(syscall.SIGKILL)

		if err != nil {
			return err
		}
	}

	return nil
}
Beispiel #28
0
func (r *AgentPool) Start() error {
	// Show the welcome banner and config options used
	r.ShowBanner()

	// Create the agent registration API Client
	r.APIClient = APIClient{Endpoint: r.Endpoint, Token: r.Token}.Create()

	// Create the agent template. We use pass this template to the register
	// call, at which point we get back a real agent.
	template := r.CreateAgentTemplate()

	logger.Info("Registering agent with Buildkite...")

	// Register the agent
	registered, err := r.RegisterAgent(template)
	if err != nil {
		logger.Fatal("%s", err)
	}

	logger.Info("Successfully registered agent \"%s\" with meta-data %s", registered.Name, registered.MetaData)

	logger.Debug("Ping interval: %ds", registered.PingInterval)
	logger.Debug("Heartbeat interval: %ds", registered.HearbeatInterval)

	// Now that we have a registereted agent, we can connect it to the API,
	// and start running jobs.
	worker := AgentWorker{Agent: registered, AgentConfiguration: r.AgentConfiguration, Endpoint: r.Endpoint}.Create()

	logger.Info("Connecting to Buildkite...")
	if err := worker.Connect(); err != nil {
		logger.Fatal("%s", err)
	}

	logger.Info("Agent successfully connected")
	logger.Info("You can press Ctrl-C to stop the agent")
	logger.Info("Waiting for work...")

	// Start a signalwatcher so we can monitor signals and handle shutdowns
	signalwatcher.Watch(func(sig signalwatcher.Signal) {
		if sig == signalwatcher.QUIT {
			logger.Debug("Received signal `%s`", sig.String())
			worker.Stop(false)
		} else if sig == signalwatcher.TERM || sig == signalwatcher.INT {
			logger.Debug("Received signal `%s`", sig.String())
			worker.Stop(true)
		} else {
			logger.Debug("Ignoring signal `%s`", sig.String())
		}
	})

	// Starts the agent worker. This will block until the agent has
	// finished or is stopped.
	if err := worker.Start(); err != nil {
		logger.Fatal("%s", err)
	}

	// Now that the agent has stopped, we can disconnect it
	logger.Info("Disconnecting %s...", worker.Agent.Name)
	worker.Disconnect()

	return nil
}
Beispiel #29
0
			// Read the default file
			filename = path.Base(found)
			input, err = ioutil.ReadFile(found)
			if err != nil {
				logger.Fatal("Failed to read file \"%s\" (%s)", found, err)
			}
		}

		if len(input) == 0 {
			logger.Fatal("Config file is empty")
		}

		var parsed []byte

		logger.Debug("Parsing pipeline...")

		// Parse the pipeline and prepare it for upload
		parsed, err = agent.PipelineParser{Data: input}.Parse()
		if err != nil {
			logger.Fatal("Pipeline parsing of \"%s\" failed (%s)", filename, err)
		}

		// Create the API client
		client := agent.APIClient{
			Endpoint: cfg.Endpoint,
			Token:    cfg.AgentAccessToken,
		}.Create()

		// Generate a UUID that will identifiy this pipeline change. We
		// do this outside of the retry loop because we want this UUID
func (a *ArtifactUploader) upload(artifacts []*api.Artifact) error {
	var uploader Uploader

	// Determine what uploader to use
	if a.Destination != "" {
		if strings.HasPrefix(a.Destination, "s3://") {
			uploader = new(S3Uploader)
		} else {
			return errors.New("Unknown upload destination: " + a.Destination)
		}
	} else {
		uploader = new(FormUploader)
	}

	// Setup the uploader
	err := uploader.Setup(a.Destination, a.APIClient.DebugHTTP)
	if err != nil {
		return err
	}

	// Set the URL's of the artifacts based on the uploader
	for _, artifact := range artifacts {
		artifact.URL = uploader.URL(artifact)
	}

	// Create the artifacts on Buildkite
	batchCreator := ArtifactBatchCreator{
		APIClient:         a.APIClient,
		JobID:             a.JobID,
		Artifacts:         artifacts,
		UploadDestination: a.Destination,
	}
	artifacts, err = batchCreator.Create()
	if err != nil {
		return err
	}

	// Prepare a concurrency pool to upload the artifacts
	p := pool.New(pool.MaxConcurrencyLimit)
	errors := []error{}

	// Create a wait group so we can make sure the uploader waits for all
	// the artifact states to upload before finishing
	var stateUploaderWaitGroup sync.WaitGroup
	stateUploaderWaitGroup.Add(1)

	// A map to keep track of artifact states and how many we've uploaded
	artifactsStates := make(map[string]string)
	artifactStatesUploaded := 0

	// Spin up a gourtine that'll uploading artifact statuses every few
	// seconds in batches
	go func() {
		for artifactStatesUploaded < len(artifacts) {
			statesToUpload := make(map[string]string)

			// Grab all the states we need to upload, and remove
			// them from the tracking map
			for id, state := range artifactsStates {
				statesToUpload[id] = state
				delete(artifactsStates, id)
			}

			if len(statesToUpload) > 0 {
				artifactStatesUploaded += len(statesToUpload)
				for id, state := range statesToUpload {
					logger.Debug("Artifact `%s` has state `%s`", id, state)
				}

				// Update the states of the artifacts in bulk.
				err = retry.Do(func(s *retry.Stats) error {
					_, err = a.APIClient.Artifacts.Update(a.JobID, statesToUpload)
					if err != nil {
						logger.Warn("%s (%s)", err, s)
					}

					return err
				}, &retry.Config{Maximum: 10, Interval: 1 * time.Second})

				if err != nil {
					logger.Error("Error uploading artifact states: %s", err)

					// Track the error that was raised
					p.Lock()
					errors = append(errors, err)
					p.Unlock()
				}

				logger.Debug("Uploaded %d artfact states (%d/%d)", len(statesToUpload), artifactStatesUploaded, len(artifacts))
			}

			// Check again for states to upload in a few seconds
			time.Sleep(1 * time.Second)
		}

		stateUploaderWaitGroup.Done()
	}()

	for _, artifact := range artifacts {
		// Create new instance of the artifact for the goroutine
		// See: http://golang.org/doc/effective_go.html#channels
		artifact := artifact

		p.Spawn(func() {
			// Show a nice message that we're starting to upload the file
			logger.Info("Uploading \"%s\" %d bytes", artifact.Path, artifact.FileSize)

			// Upload the artifact and then set the state depending
			// on whether or not it passed. We'll retry the upload
			// a couple of times before giving up.
			err = retry.Do(func(s *retry.Stats) error {
				err := uploader.Upload(artifact)
				if err != nil {
					logger.Warn("%s (%s)", err, s)
				}

				return err
			}, &retry.Config{Maximum: 10, Interval: 1 * time.Second})

			var state string

			// Did the upload eventually fail?
			if err != nil {
				logger.Error("Error uploading artifact \"%s\": %s", artifact.Path, err)

				// Track the error that was raised
				p.Lock()
				errors = append(errors, err)
				p.Unlock()

				state = "error"
			} else {
				state = "finished"
			}

			artifactsStates[artifact.ID] = state
		})
	}

	// Wait for the pool to finish
	p.Wait()

	// Wait for the statuses to finish uploading
	stateUploaderWaitGroup.Wait()

	if len(errors) > 0 {
		logger.Fatal("There were errors with uploading some of the artifacts")
	}

	return nil
}