示例#1
0
func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) {
	r.runWithDeadline(r.deadline, func() {
		if r.bucket == nil {
			return
		}

		if len(cmdCnf.Artifacts) == 0 {
			return
		}

		matches, err := a.CollectArtifacts(cmdCnf.Artifacts, clientLog)
		if err != nil {
			clientLog.Writeln(fmt.Sprintf("[artifactstore] ERROR filtering artifacts: " + err.Error()))
			return
		}

		var wg sync.WaitGroup
		for _, artifact := range matches {
			wg.Add(1)
			go func(artifact string) {
				defer wg.Done()

				log.Println(fmt.Sprintf("[artifactstore] Uploading: %s", artifact))
				fileBaseName := filepath.Base(artifact)

				if f, err := os.Open(artifact); err != nil {
					clientLog.Writeln(fmt.Sprintf("[artifactstore] Error opening file for streaming %s: %s", artifact, err))
					return
				} else if stat, err := f.Stat(); err != nil {
					clientLog.Writeln(fmt.Sprintf("[artifactstore] Error stat'ing file for streaming %s: %s", artifact, err))
					return
				} else if sAfct, err := r.bucket.NewStreamedArtifact(fileBaseName, stat.Size()); err != nil {
					clientLog.Writeln(fmt.Sprintf("[artifactstore] Error creating streaming artifact for %s: %s", artifact, err))
					return
				} else {
					// TODO: If possible, avoid reading entire contents of the file into memory, and pass the
					// file io.Reader directly to http.Post.
					//
					// The reason it is done this way is because, using bytes.NewReader() ensures that
					// Content-Length header is set to a correct value. If not, it is left blank. Alternately,
					// we could remove this requirement from the server where Content-Length is verified before
					// starting upload to S3.
					if contents, err := ioutil.ReadAll(f); err != nil {
						clientLog.Writeln(fmt.Sprintf("[artifactstore] Error reading file for streaming %s: %s", artifact, err))
						return
					} else if err := sAfct.UploadArtifact(bytes.NewReader(contents)); err != nil {
						// TODO retry if not a terminal error
						clientLog.Writeln(fmt.Sprintf("[artifactstore] Error uploading contents of %s: %s", artifact, err))
						return
					} else {
						clientLog.Writeln(fmt.Sprintf("[artifactstore] Successfully uploaded artifact %s to %s", artifact, sAfct.GetContentURL()))
					}
				}
			}(artifact)
		}

		wg.Wait()
	})
}
示例#2
0
// If we were running in an lxc container, the artifacts are already grouped
// but they need to be removed from the container and placed in the actual
// artifact destination. Because we pass through the Jenkins environment
// variables to the commands inside of the container, we expect that they
// be in the same location as we expect them to be, except nested within
// the mounted filesystem.
func (r *Reporter) PublishArtifacts(cmdCnf client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) {
	if a.GetRootFs() == "/" {
		log.Printf("[reporter] RootFs is /, no need to move artifacts")
		return
	}

	artifactSource := path.Join(a.GetRootFs(), r.artifactDestination)
	log.Printf("[reporter] Moving artifacts from %s to: %s\n", artifactSource, r.artifactDestination)
	cmd := exec.Command("mkdir", "-p", artifactDestination)
	err := cmd.Run()
	if err != nil {
		log.Printf("[reporter] Failed to create artifact destination")
	}
	cmd = exec.Command("cp", "-f", "-r", path.Join(artifactSource, "."), r.artifactDestination)
	err = cmd.Run()
	if err != nil {
		log.Printf("[reporter] Failed to push artifacts; possibly the source artifact folder did not exist")
	}
}
示例#3
0
func (r *Reporter) PublishArtifacts(cmd client.ConfigCmd, a adapter.Adapter, clientLog *client.Log) {
	if len(cmd.Artifacts) == 0 {
		clientLog.Writeln("==> Skipping artifact collection")
		return
	}

	clientLog.Writeln(fmt.Sprintf("==> Collecting artifacts matching %s", cmd.Artifacts))

	matches, err := a.CollectArtifacts(cmd.Artifacts, clientLog)
	if err != nil {
		clientLog.Writeln(fmt.Sprintf("==> ERROR: " + err.Error()))
		return
	}

	for _, artifact := range matches {
		clientLog.Writeln(fmt.Sprintf("==> Found: %s", artifact))
	}

	r.pushArtifacts(matches)
}