func TestS3ProviderUpload(t *testing.T) {
	opts := NewOptions()
	s3p := newS3Provider(opts, getPanicLogger())
	s3p.overrideConn = testS3
	s3p.overrideAuth = aws.Auth{
		AccessKey: "whatever",
		SecretKey: "whatever",
		Token:     "whatever",
	}

	in := make(chan *artifact.Artifact)
	out := make(chan *artifact.Artifact)
	done := make(chan bool)

	go s3p.Upload("test-0", opts, in, out, done)

	go func() {
		for _, p := range testArtifactPaths {
			if !p.Valid {
				continue
			}

			a := artifact.New("bucket", p.Path, "linux/foo", &artifact.Options{
				Perm:     s3.PublicRead,
				RepoSlug: "owner/foo",
			})

			in <- a
			fmt.Printf("---> Fed artifact: %#v\n", a)
		}
		close(in)
	}()

	accum := []*artifact.Artifact{}
	for {
		select {
		case <-time.After(5 * time.Second):
			t.Fatalf("took too long oh derp")
		case a := <-out:
			accum = append(accum, a)
		case <-done:
			if len(accum) == 0 {
				t.Fatalf("nothing uploaded")
			}
			return
		}
	}
}
func TestArtifactsUpload(t *testing.T) {
	opts := NewOptions()
	log := getPanicLogger()
	ap := newArtifactsProvider(opts, log)
	ap.overrideClient = &nullPutter{}

	in := make(chan *artifact.Artifact)
	out := make(chan *artifact.Artifact)
	done := make(chan bool)

	go ap.Upload("test-0", opts, in, out, done)

	go func() {
		for _, p := range testArtifactPaths {
			if !p.Valid {
				continue
			}

			a := artifact.New("bucket", p.Path, "linux/foo", &artifact.Options{
				Perm:     s3.PublicRead,
				RepoSlug: "owner/foo",
			})

			in <- a
			fmt.Printf("---> Fed artifact: %#v\n", a)
		}
		close(in)
	}()

	accum := []*artifact.Artifact{}
	for {
		select {
		case <-time.After(5 * time.Second):
			t.Fatalf("took too long oh derp")
		case a := <-out:
			accum = append(accum, a)
		case <-done:
			if len(accum) == 0 {
				t.Fatalf("nothing uploaded")
			}
			return
		}
	}
}
Beispiel #3
0
func (u *uploader) artifactFeederLoop(path *path.Path, artifacts chan *artifact.Artifact) error {
	to, from, root := path.To, path.From, path.Root
	if path.IsDir() {
		root = filepath.Join(root, from)
	}

	artifactOpts := &artifact.Options{
		Perm:        u.Opts.Perm,
		RepoSlug:    u.Opts.RepoSlug,
		BuildNumber: u.Opts.BuildNumber,
		BuildID:     u.Opts.BuildID,
		JobNumber:   u.Opts.JobNumber,
		JobID:       u.Opts.JobID,
	}

	filepath.Walk(path.Fullpath(), func(f string, info os.FileInfo, err error) error {
		if info != nil && info.IsDir() {
			return nil
		}

		relPath := strings.Replace(strings.Replace(f, root, "", -1), root+"/", "", -1)
		destination := relPath
		if len(to) > 0 {
			if path.IsDir() {
				destination = filepath.Join(to, relPath)
			} else {
				destination = to
			}
		}

		for _, targetPath := range u.Opts.TargetPaths {
			err := func() error {
				u.curSize.Lock()
				defer u.curSize.Unlock()

				a := artifact.New(path, targetPath, destination, artifactOpts)

				size, err := a.Size()
				if err != nil {
					return err
				}

				u.curSize.Current += size
				logFields := logrus.Fields{
					"current_size":     humanize.Bytes(u.curSize.Current),
					"max_size":         humanize.Bytes(u.Opts.MaxSize),
					"percent_max_size": pctMax(size, u.Opts.MaxSize),
					"artifact":         relPath,
					"artifact_size":    humanize.Bytes(size),
				}

				if u.curSize.Current > u.Opts.MaxSize {
					msg := "max-size would be exceeded"
					u.log.WithFields(logFields).Error(msg)
					return fmt.Errorf(msg)
				}

				u.log.WithFields(logFields).Debug("queueing artifact")
				artifacts <- a
				return nil
			}()
			if err != nil {
				return err
			}
		}
		return nil
	})

	return nil
}