Exemplo n.º 1
0
// Run an ogletest test that checks expectations for parallel calls to
// symlink(2).
func RunSymlinkInParallelTest(
	ctx context.Context,
	dir string) {
	// Ensure that we get parallelism for this test.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))

	// Try for awhile to see if anything breaks.
	const duration = 500 * time.Millisecond
	startTime := time.Now()
	for time.Since(startTime) < duration {
		filename := path.Join(dir, "foo")

		// Set up a function that creates the symlink, ignoring EEXIST errors.
		worker := func(id byte) (err error) {
			err = os.Symlink("blah", filename)

			if os.IsExist(err) {
				err = nil
			}

			if err != nil {
				err = fmt.Errorf("Worker %d: Symlink: %v", id, err)
				return
			}

			return
		}

		// Run several workers in parallel.
		const numWorkers = 16
		b := syncutil.NewBundle(ctx)
		for i := 0; i < numWorkers; i++ {
			id := byte(i)
			b.Add(func(ctx context.Context) (err error) {
				err = worker(id)
				return
			})
		}

		err := b.Join()
		AssertEq(nil, err)

		// The symlink should have been created, once.
		entries, err := ReadDirPicky(dir)
		AssertEq(nil, err)
		AssertEq(1, len(entries))
		AssertEq("foo", entries[0].Name())

		// Delete the directory.
		err = os.Remove(filename)
		AssertEq(nil, err)
	}
}
Exemplo n.º 2
0
// Run workers until SIGINT is received. Return a slice of results.
func runWorkers(
	ctx context.Context,
	o *gcs.Object,
	bucket gcs.Bucket) (results []result, err error) {
	b := syncutil.NewBundle(ctx)

	// Set up a channel that is closed upon SIGINT.
	stop := make(chan struct{})
	go func() {
		c := make(chan os.Signal, 1)
		signal.Notify(c, os.Interrupt)

		<-c
		log.Printf("SIGINT received. Stopping soon...")
		close(stop)
	}()

	// Start several workers making random reads.
	var wg sync.WaitGroup
	resultChan := make(chan result)
	for i := 0; i < *fWorkers; i++ {
		wg.Add(1)
		b.Add(func(ctx context.Context) (err error) {
			defer wg.Done()
			err = makeReads(ctx, o, bucket, resultChan, stop)
			if err != nil {
				err = fmt.Errorf("makeReads: %v", err)
				return
			}

			return
		})
	}

	go func() {
		wg.Wait()
		close(resultChan)
	}()

	// Accumulate results.
	b.Add(func(ctx context.Context) (err error) {
		for r := range resultChan {
			results = append(results, r)
		}

		return
	})

	err = b.Join()
	return
}
Exemplo n.º 3
0
// Delete all objects from the supplied bucket. Results are undefined if the
// bucket is being concurrently updated.
func DeleteAllObjects(
	ctx context.Context,
	bucket gcs.Bucket) error {
	bundle := syncutil.NewBundle(ctx)

	// List all of the objects in the bucket.
	objects := make(chan *gcs.Object, 100)
	bundle.Add(func(ctx context.Context) error {
		defer close(objects)
		return ListPrefix(ctx, bucket, "", objects)
	})

	// Strip everything but the name.
	objectNames := make(chan string, 10e3)
	bundle.Add(func(ctx context.Context) (err error) {
		defer close(objectNames)
		for o := range objects {
			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case objectNames <- o.Name:
			}
		}

		return
	})

	// Delete the objects in parallel.
	const parallelism = 64
	for i := 0; i < parallelism; i++ {
		bundle.Add(func(ctx context.Context) error {
			for objectName := range objectNames {
				err := bucket.DeleteObject(
					ctx,
					&gcs.DeleteObjectRequest{
						Name: objectName,
					})

				if err != nil {
					return err
				}
			}

			return nil
		})
	}

	return bundle.Join()
}
Exemplo n.º 4
0
func (d *dirInode) lookUpChildDir(
	ctx context.Context,
	name string) (result LookUpResult, err error) {
	b := syncutil.NewBundle(ctx)

	// Stat the placeholder object.
	b.Add(func(ctx context.Context) (err error) {
		result.FullName = d.Name() + name + "/"
		result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName)
		if err != nil {
			err = fmt.Errorf("statObjectMayNotExist: %v", err)
			return
		}

		return
	})

	// If implicit directories are enabled, find out whether the child name is
	// implicitly defined.
	if d.implicitDirs {
		b.Add(func(ctx context.Context) (err error) {
			result.ImplicitDir, err = objectNamePrefixNonEmpty(
				ctx,
				d.bucket,
				d.Name()+name+"/")

			if err != nil {
				err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err)
				return
			}

			return
		})
	}

	// Wait for both.
	err = b.Join()
	if err != nil {
		return
	}

	return
}
Exemplo n.º 5
0
// Create multiple objects with some parallelism, with contents according to
// the supplied map from name to contents.
func CreateObjects(
	ctx context.Context,
	bucket gcs.Bucket,
	input map[string][]byte) (err error) {
	bundle := syncutil.NewBundle(ctx)

	// Feed ObjectInfo records into a channel.
	type record struct {
		name     string
		contents []byte
	}

	recordChan := make(chan record, len(input))
	for name, contents := range input {
		recordChan <- record{name, contents}
	}

	close(recordChan)

	// Create the objects in parallel.
	const parallelism = 64
	for i := 0; i < 10; i++ {
		bundle.Add(func(ctx context.Context) (err error) {
			for r := range recordChan {
				_, err = CreateObject(
					ctx, bucket,
					r.name,
					r.contents)

				if err != nil {
					return
				}
			}

			return
		})
	}

	err = bundle.Join()
	return
}
Exemplo n.º 6
0
// Run an ogletest test that checks expectations for parallel calls to open(2)
// with O_CREAT.
func RunCreateInParallelTest_NoTruncate(
	ctx context.Context,
	dir string) {
	// Ensure that we get parallelism for this test.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))

	// Try for awhile to see if anything breaks.
	const duration = 500 * time.Millisecond
	startTime := time.Now()
	for time.Since(startTime) < duration {
		filename := path.Join(dir, "foo")

		// Set up a function that opens the file with O_CREATE and then appends a
		// byte to it.
		worker := func(id byte) (err error) {
			f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
			if err != nil {
				err = fmt.Errorf("Worker %d: Open: %v", id, err)
				return
			}

			defer f.Close()

			_, err = f.Write([]byte{id})
			if err != nil {
				err = fmt.Errorf("Worker %d: Write: %v", id, err)
				return
			}

			return
		}

		// Run several workers in parallel.
		const numWorkers = 16
		b := syncutil.NewBundle(ctx)
		for i := 0; i < numWorkers; i++ {
			id := byte(i)
			b.Add(func(ctx context.Context) (err error) {
				err = worker(id)
				return
			})
		}

		err := b.Join()
		AssertEq(nil, err)

		// Read the contents of the file. We should see each worker's ID once.
		contents, err := ioutil.ReadFile(filename)
		AssertEq(nil, err)

		idsSeen := make(map[byte]struct{})
		for i, _ := range contents {
			id := contents[i]
			AssertLt(id, numWorkers)

			if _, ok := idsSeen[id]; ok {
				AddFailure("Duplicate ID: %d", id)
			}

			idsSeen[id] = struct{}{}
		}

		AssertEq(numWorkers, len(idsSeen))

		// Delete the file.
		err = os.Remove(filename)
		AssertEq(nil, err)
	}
}
Exemplo n.º 7
0
// Run an ogletest test that checks expectations for parallel calls to open(2)
// with O_CREAT|O_EXCL.
func RunCreateInParallelTest_Exclusive(
	ctx context.Context,
	dir string) {
	// Ensure that we get parallelism for this test.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))

	// Try for awhile to see if anything breaks.
	const duration = 500 * time.Millisecond
	startTime := time.Now()
	for time.Since(startTime) < duration {
		filename := path.Join(dir, "foo")

		// Set up a function that opens the file with O_CREATE and O_EXCL, and then
		// appends a byte to it if it was successfully opened.
		var openCount uint64
		worker := func(id byte) (err error) {
			f, err := os.OpenFile(
				filename,
				os.O_CREATE|os.O_EXCL|os.O_WRONLY|os.O_APPEND,
				0600)

			// If we failed to open due to the file already existing, just leave.
			if os.IsExist(err) {
				err = nil
				return
			}

			// Propgate other errors.
			if err != nil {
				err = fmt.Errorf("Worker %d: Open: %v", id, err)
				return
			}

			atomic.AddUint64(&openCount, 1)
			defer f.Close()

			_, err = f.Write([]byte{id})
			if err != nil {
				err = fmt.Errorf("Worker %d: Write: %v", id, err)
				return
			}

			return
		}

		// Run several workers in parallel.
		const numWorkers = 16
		b := syncutil.NewBundle(ctx)
		for i := 0; i < numWorkers; i++ {
			id := byte(i)
			b.Add(func(ctx context.Context) (err error) {
				err = worker(id)
				return
			})
		}

		err := b.Join()
		AssertEq(nil, err)

		// Exactly one worker should have opened successfully.
		AssertEq(1, openCount)

		// Read the contents of the file. It should contain that one worker's ID.
		contents, err := ioutil.ReadFile(filename)
		AssertEq(nil, err)

		AssertEq(1, len(contents))
		AssertLt(contents[0], numWorkers)

		// Delete the file.
		err = os.Remove(filename)
		AssertEq(nil, err)
	}
}
Exemplo n.º 8
0
func (t *StressTest) TruncateFileManyTimesInParallel() {
	// Ensure that we get parallelism for this test.
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))

	// Create a file.
	f, err := os.Create(path.Join(t.Dir, "foo"))
	AssertEq(nil, err)
	defer f.Close()

	// Set up a function that repeatedly truncates the file to random lengths,
	// writing the final size to a channel.
	worker := func(finalSize chan<- int64) (err error) {
		const desiredDuration = 500 * time.Millisecond

		var size int64
		startTime := time.Now()
		for time.Since(startTime) < desiredDuration {
			for i := 0; i < 10; i++ {
				size = rand.Int63n(1 << 14)
				err = f.Truncate(size)
				if err != nil {
					return
				}
			}
		}

		finalSize <- size
		return
	}

	// Run several workers.
	b := syncutil.NewBundle(t.ctx)

	const numWorkers = 16
	finalSizes := make(chan int64, numWorkers)

	for i := 0; i < numWorkers; i++ {
		b.Add(func(ctx context.Context) (err error) {
			err = worker(finalSizes)
			return
		})
	}

	err = b.Join()
	AssertEq(nil, err)

	close(finalSizes)

	// The final size should be consistent.
	fi, err := f.Stat()
	AssertEq(nil, err)

	var found = false
	for s := range finalSizes {
		if s == fi.Size() {
			found = true
			break
		}
	}

	ExpectTrue(found, "Unexpected size: %d", fi.Size())
}
Exemplo n.º 9
0
// LOCKS_REQUIRED(d)
func (d *dirInode) LookUpChild(
	ctx context.Context,
	name string) (result LookUpResult, err error) {
	// Consult the cache about the type of the child. This may save us work
	// below.
	now := d.clock.Now()
	cacheSaysFile := d.cache.IsFile(now, name)
	cacheSaysDir := d.cache.IsDir(now, name)

	// Is this a conflict marker name?
	if strings.HasSuffix(name, ConflictingFileNameSuffix) {
		result, err = d.lookUpConflicting(ctx, name)
		return
	}

	// Stat the child as a file, unless the cache has told us it's a directory
	// but not a file.
	b := syncutil.NewBundle(ctx)

	var fileResult LookUpResult
	if !(cacheSaysDir && !cacheSaysFile) {
		b.Add(func(ctx context.Context) (err error) {
			fileResult, err = d.lookUpChildFile(ctx, name)
			return
		})
	}

	// Stat the child as a directory, unless the cache has told us it's a file
	// but not a directory.
	var dirResult LookUpResult
	if !(cacheSaysFile && !cacheSaysDir) {
		b.Add(func(ctx context.Context) (err error) {
			dirResult, err = d.lookUpChildDir(ctx, name)
			return
		})
	}

	// Wait for both.
	err = b.Join()
	if err != nil {
		return
	}

	// Prefer directories over files.
	switch {
	case dirResult.Exists():
		result = dirResult
	case fileResult.Exists():
		result = fileResult
	}

	// Update the cache.
	now = d.clock.Now()
	if fileResult.Exists() {
		d.cache.NoteFile(now, name)
	}

	if dirResult.Exists() {
		d.cache.NoteDir(now, name)
	}

	return
}
Exemplo n.º 10
0
// Given a list of child names that appear to be directories according to
// d.bucket.ListObjects (which always behaves as if implicit directories are
// enabled), filter out the ones for which a placeholder object does not
// actually exist. If implicit directories are enabled, simply return them all.
//
// LOCKS_REQUIRED(d)
func (d *dirInode) filterMissingChildDirs(
	ctx context.Context,
	in []string) (out []string, err error) {
	// Do we need to do anything?
	if d.implicitDirs {
		out = in
		return
	}

	b := syncutil.NewBundle(ctx)

	// First add any names that we already know are directories according to our
	// cache, removing them from the input.
	now := d.clock.Now()
	var tmp []string
	for _, name := range in {
		if d.cache.IsDir(now, name) {
			out = append(out, name)
		} else {
			tmp = append(tmp, name)
		}
	}

	in = tmp

	// Feed names into a channel.
	unfiltered := make(chan string, 100)
	b.Add(func(ctx context.Context) (err error) {
		defer close(unfiltered)

		for _, name := range in {
			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case unfiltered <- name:
			}
		}

		return
	})

	// Stat the placeholder object for each, filtering out placeholders that are
	// not found. Use some parallelism.
	const statWorkers = 32
	filtered := make(chan string, 100)
	var wg sync.WaitGroup
	for i := 0; i < statWorkers; i++ {
		wg.Add(1)
		b.Add(func(ctx context.Context) (err error) {
			defer wg.Done()
			err = filterMissingChildDirNames(
				ctx,
				d.bucket,
				d.Name(),
				unfiltered,
				filtered)

			return
		})
	}

	go func() {
		wg.Wait()
		close(filtered)
	}()

	// Accumulate into a slice.
	var filteredSlice []string
	b.Add(func(ctx context.Context) (err error) {
		for name := range filtered {
			filteredSlice = append(filteredSlice, name)
		}

		return
	})

	// Wait for everything to complete.
	err = b.Join()

	// Update the cache with everything we learned.
	now = d.clock.Now()
	for _, name := range filteredSlice {
		d.cache.NoteDir(now, name)
	}

	// Return everything we learned.
	out = append(out, filteredSlice...)

	return
}
Exemplo n.º 11
0
func run() (err error) {
	runtime.GOMAXPROCS(4)

	// Grab the bucket.
	bucket, err := getBucket()
	if err != nil {
		err = fmt.Errorf("getBucket: %v", err)
		return
	}

	b := syncutil.NewBundle(context.Background())

	// Create objects.
	toVerify := make(chan record, 2*createRateHz*(verifyAtAge/time.Second))
	b.Add(func(ctx context.Context) (err error) {
		defer close(toVerify)
		err = createObjects(ctx, bucket, toVerify)
		if err != nil {
			err = fmt.Errorf("createObjects: %v", err)
			return
		}

		return
	})

	// Verify them after awhile.
	toVerifyAgain := make(
		chan record,
		2*createRateHz*(verifyAgainAtAge/time.Second))

	b.Add(func(ctx context.Context) (err error) {
		defer close(toVerifyAgain)
		err = verifyObjects(
			ctx,
			bucket,
			verifyAtAge,
			toVerify,
			toVerifyAgain)

		if err != nil {
			err = fmt.Errorf("verifyObjects: %v", err)
			return
		}

		return
	})

	// And again.
	andOnceMore := make(
		chan record,
		2*createRateHz*(andAgainAtAge/time.Second))

	b.Add(func(ctx context.Context) (err error) {
		defer close(andOnceMore)
		err = verifyObjects(
			ctx,
			bucket,
			verifyAgainAtAge,
			toVerifyAgain,
			andOnceMore)

		if err != nil {
			err = fmt.Errorf("verifyObjects: %v", err)
			return
		}

		return
	})

	// And again.
	b.Add(func(ctx context.Context) (err error) {
		err = verifyObjects(
			ctx,
			bucket,
			verifyAgainAtAge,
			andOnceMore,
			nil)

		if err != nil {
			err = fmt.Errorf("verifyObjects: %v", err)
			return
		}

		return
	})

	err = b.Join()
	return
}
Exemplo n.º 12
0
func verifyObjects(
	ctx context.Context,
	bucket gcs.Bucket,
	verifyAfter time.Duration,
	in <-chan record,
	out chan<- record) (err error) {
	// Set up a worker function.
	worker := func(ctx context.Context) (err error) {
		for r := range in {
			name := fmt.Sprintf("%s%x", objectNamePrefix, r.sha1)

			// Wait until it is time.
			wakeTime := r.creationTime.Add(verifyAfter)
			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case <-time.After(wakeTime.Sub(time.Now())):
			}

			// Attempt to read the object.
			var contents []byte
			contents, err = gcsutil.ReadObject(ctx, bucket, name)
			if err != nil {
				err = fmt.Errorf("ReadObject(%q): %v", name, err)
				return
			}

			// Check the contents.
			actual := sha1.Sum(contents)
			if actual != r.sha1 {
				err = fmt.Errorf(
					"SHA1 mismatch for %q: %x vs. %x",
					name,
					actual,
					r.sha1)

				return
			}

			log.Printf("Verified object %q.", name)

			// Pass on the record if we've been asked to.
			if out != nil {
				select {
				case <-ctx.Done():
					err = ctx.Err()
					return

				case out <- r:
				}
			}
		}

		return
	}

	// Run a bunch of workers.
	b := syncutil.NewBundle(ctx)
	for i := 0; i < perStageParallelism; i++ {
		b.Add(worker)
	}

	err = b.Join()
	return
}
Exemplo n.º 13
0
func createObjects(
	ctx context.Context,
	bucket gcs.Bucket,
	out chan<- record) (err error) {
	throttle := time.Tick(time.Second / createRateHz)

	// Set up a worker function.
	worker := func(ctx context.Context) (err error) {
		randSrc := rand.New(rand.NewSource(makeSeed()))
		for {
			// Choose a random size (every once in awhile making sure we see the
			// max), and generate content of that size.
			const maxSize = 1 << 24
			var size int
			if randSrc.Int31n(100) == 0 {
				size = maxSize
			} else {
				size = int(randSrc.Int31n(maxSize + 1))
			}

			content := randBytes(randSrc, size)

			// Compute hashes and checksums.
			sha1 := sha1.Sum(content)
			crc32c := *gcsutil.CRC32C(content)

			// Wait for permission to proceed.
			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case <-throttle:
			}

			// Create an object.
			req := &gcs.CreateObjectRequest{
				Name:     fmt.Sprintf("%s%x", objectNamePrefix, sha1),
				Contents: bytes.NewReader(content),
				CRC32C:   &crc32c,

				Metadata: map[string]string{
					"expected_sha1":   fmt.Sprintf("%x", sha1),
					"expected_crc32c": fmt.Sprintf("%#08x", crc32c),
				},
			}

			var o *gcs.Object
			o, err = bucket.CreateObject(ctx, req)
			if err != nil {
				err = fmt.Errorf("CreateObject(%q): %v", req.Name, err)
				return
			}

			log.Printf("Created object %q.", req.Name)

			// Check the object.
			if o.Name != req.Name {
				err = fmt.Errorf(
					"Name mismatch: %q vs. %q",
					o.Name,
					req.Name)

				return
			}

			if o.CRC32C != crc32c {
				err = fmt.Errorf(
					"Object %q CRC mismatch: %#08x vs. %#08x",
					o.Name,
					o.CRC32C,
					crc32c)

				return
			}

			// Write out the record.
			r := record{
				sha1:         sha1,
				creationTime: time.Now(),
			}

			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case out <- r:
			}
		}
	}

	// Run a bunch of workers.
	b := syncutil.NewBundle(ctx)
	for i := 0; i < perStageParallelism; i++ {
		b.Add(worker)
	}

	err = b.Join()
	return
}
Exemplo n.º 14
0
func createFiles(
	dir string,
	numFiles int) (files []*os.File, err error) {
	b := syncutil.NewBundle(context.Background())

	// Create files in parallel, and write them to a channel.
	const parallelism = 128

	var counter uint64
	fileChan := make(chan *os.File)
	var wg sync.WaitGroup

	for i := 0; i < parallelism; i++ {
		wg.Add(1)
		b.Add(func(ctx context.Context) (err error) {
			defer wg.Done()
			for {
				// Should we create another?
				count := atomic.AddUint64(&counter, 1)
				if count > uint64(numFiles) {
					return
				}

				// Create it.
				var f *os.File
				f, err = fsutil.AnonymousFile(dir)
				if err != nil {
					err = fmt.Errorf("AnonymousFile: %v", err)
					return
				}

				// Write it to the channel.
				select {
				case fileChan <- f:
				case <-ctx.Done():
					err = ctx.Err()
					return
				}
			}
		})
	}

	go func() {
		wg.Wait()
		close(fileChan)
	}()

	// Accumulate into the slice.
	b.Add(func(ctx context.Context) (err error) {
		for f := range fileChan {
			files = append(files, f)
		}

		return
	})

	err = b.Join()
	if err != nil {
		closeAll(files)
		files = nil
	}

	return
}
Exemplo n.º 15
0
func garbageCollectOnce(
	ctx context.Context,
	tmpObjectPrefix string,
	bucket gcs.Bucket) (objectsDeleted uint64, err error) {
	const stalenessThreshold = 30 * time.Minute
	b := syncutil.NewBundle(ctx)

	// List all objects with the temporary prefix.
	objects := make(chan *gcs.Object, 100)
	b.Add(func(ctx context.Context) (err error) {
		defer close(objects)
		err = gcsutil.ListPrefix(ctx, bucket, tmpObjectPrefix, objects)
		if err != nil {
			err = fmt.Errorf("ListPrefix: %v", err)
			return
		}

		return
	})

	// Filter to the names of objects that are stale.
	now := time.Now()
	staleNames := make(chan string, 100)
	b.Add(func(ctx context.Context) (err error) {
		defer close(staleNames)
		for o := range objects {
			if now.Sub(o.Updated) < stalenessThreshold {
				continue
			}

			select {
			case <-ctx.Done():
				err = ctx.Err()
				return

			case staleNames <- o.Name:
			}
		}

		return
	})

	// Delete those objects.
	b.Add(func(ctx context.Context) (err error) {
		for name := range staleNames {
			err = bucket.DeleteObject(
				ctx,
				&gcs.DeleteObjectRequest{
					Name: name,
				})

			if err != nil {
				err = fmt.Errorf("DeleteObject(%q): %v", name, err)
				return
			}

			atomic.AddUint64(&objectsDeleted, 1)
		}

		return
	})

	err = b.Join()
	return
}