Example #1
0
func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
	// Include a buffer so that slow client connections don't affect
	// transfer performance.
	progressChan := make(chan progress.Progress, 100)

	writesDone := make(chan struct{})

	ctx, cancelFunc := context.WithCancel(ctx)

	go func() {
		writeDistributionProgress(cancelFunc, outStream, progressChan)
		close(writesDone)
	}()

	imagePullConfig := &distribution.ImagePullConfig{
		MetaHeaders:      metaHeaders,
		AuthConfig:       authConfig,
		ProgressOutput:   progress.ChanOutput(progressChan),
		RegistryService:  daemon.RegistryService,
		ImageEventLogger: daemon.LogImageEvent,
		MetadataStore:    daemon.distributionMetadataStore,
		ImageStore:       daemon.imageStore,
		ReferenceStore:   daemon.referenceStore,
		DownloadManager:  daemon.downloadManager,
	}

	err := distribution.Pull(ctx, ref, imagePullConfig)
	close(progressChan)
	<-writesDone
	return err
}
Example #2
0
func TestCancelledUpload(t *testing.T) {
	lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})

	go func() {
		for range progressChan {
		}
		close(progressDone)
	}()

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		<-time.After(time.Millisecond)
		cancel()
	}()

	descriptors := uploadDescriptors(nil)
	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
	if err != context.Canceled {
		t.Fatal("expected upload to be cancelled")
	}

	close(progressChan)
	<-progressDone
}
Example #3
0
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
	if outStream != nil {
		// Include a buffer so that slow client connections don't affect
		// transfer performance.
		progressChan := make(chan progress.Progress, 100)

		writesDone := make(chan struct{})

		defer func() {
			close(progressChan)
			<-writesDone
		}()

		var cancelFunc context.CancelFunc
		ctx, cancelFunc = context.WithCancel(ctx)

		go func() {
			progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
			close(writesDone)
		}()

		config.ProgressOutput = progress.ChanOutput(progressChan)
	} else {
		config.ProgressOutput = progress.DiscardOutput()
	}
	return distribution.Pull(ctx, ref, config)
}
Example #4
0
func TestSuccessfulUpload(t *testing.T) {
	lum := NewLayerUploadManager(maxUploadConcurrency, func(m *LayerUploadManager) { m.waitDuration = time.Millisecond })

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]int64)

	go func() {
		for p := range progressChan {
			receivedProgress[p.ID] = p.Current
		}
		close(progressDone)
	}()

	var currentUploads int32
	descriptors := uploadDescriptors(&currentUploads)

	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
	if err != nil {
		t.Fatalf("upload error: %v", err)
	}

	close(progressChan)
	<-progressDone
}
Example #5
0
func TestCancelledDownload(t *testing.T) {
	ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency)

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})

	go func() {
		for range progressChan {
		}
		close(progressDone)
	}()

	ctx, cancel := context.WithCancel(context.Background())

	go func() {
		<-time.After(time.Millisecond)
		cancel()
	}()

	descriptors := downloadDescriptors(nil)
	_, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan))
	if err != context.Canceled {
		t.Fatal("expected download to be cancelled")
	}

	close(progressChan)
	<-progressDone
}
Example #6
0
// PushImage initiates a push operation on the repository named localName.
func (daemon *Daemon) PushImage(ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
	// Include a buffer so that slow client connections don't affect
	// transfer performance.
	progressChan := make(chan progress.Progress, 100)

	writesDone := make(chan struct{})

	ctx, cancelFunc := context.WithCancel(context.Background())

	go func() {
		writeDistributionProgress(cancelFunc, outStream, progressChan)
		close(writesDone)
	}()

	imagePushConfig := &distribution.ImagePushConfig{
		MetaHeaders:     metaHeaders,
		AuthConfig:      authConfig,
		ProgressOutput:  progress.ChanOutput(progressChan),
		RegistryService: daemon.RegistryService,
		EventsService:   daemon.EventsService,
		MetadataStore:   daemon.distributionMetadataStore,
		LayerStore:      daemon.layerStore,
		ImageStore:      daemon.imageStore,
		TagStore:        daemon.tagStore,
		TrustKey:        daemon.trustKey,
		UploadManager:   daemon.uploadManager,
	}

	err := distribution.Push(ctx, ref, imagePushConfig)
	close(progressChan)
	<-writesDone
	return err
}
Example #7
0
func TestTransfer(t *testing.T) {
	makeXferFunc := func(id string) DoFunc {
		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
			select {
			case <-start:
			default:
				t.Fatalf("transfer function not started even though concurrency limit not reached")
			}

			xfer := NewTransfer()
			go func() {
				for i := 0; i <= 10; i++ {
					progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
					time.Sleep(10 * time.Millisecond)
				}
				close(progressChan)
			}()
			return xfer
		}
	}

	tm := NewTransferManager(5)
	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]int64)

	go func() {
		for p := range progressChan {
			val, present := receivedProgress[p.ID]
			if present && p.Current <= val {
				t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1)
			}
			receivedProgress[p.ID] = p.Current
		}
		close(progressDone)
	}()

	// Start a few transfers
	ids := []string{"id1", "id2", "id3"}
	xfers := make([]Transfer, len(ids))
	watchers := make([]*Watcher, len(ids))
	for i, id := range ids {
		xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
	}

	for i, xfer := range xfers {
		<-xfer.Done()
		xfer.Release(watchers[i])
	}
	close(progressChan)
	<-progressDone

	for _, id := range ids {
		if receivedProgress[id] != 10 {
			t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
		}
	}
}
Example #8
0
func TestConcurrencyLimit(t *testing.T) {
	concurrencyLimit := 3
	var runningJobs int32

	makeXferFunc := func(id string) DoFunc {
		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
			xfer := NewTransfer()
			go func() {
				<-start
				totalJobs := atomic.AddInt32(&runningJobs, 1)
				if int(totalJobs) > concurrencyLimit {
					t.Fatalf("too many jobs running")
				}
				for i := 0; i <= 10; i++ {
					progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10}
					time.Sleep(10 * time.Millisecond)
				}
				atomic.AddInt32(&runningJobs, -1)
				close(progressChan)
			}()
			return xfer
		}
	}

	tm := NewTransferManager(concurrencyLimit)
	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]int64)

	go func() {
		for p := range progressChan {
			receivedProgress[p.ID] = p.Current
		}
		close(progressDone)
	}()

	// Start more transfers than the concurrency limit
	ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"}
	xfers := make([]Transfer, len(ids))
	watchers := make([]*Watcher, len(ids))
	for i, id := range ids {
		xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan))
	}

	for i, xfer := range xfers {
		<-xfer.Done()
		xfer.Release(watchers[i])
	}
	close(progressChan)
	<-progressDone

	for _, id := range ids {
		if receivedProgress[id] != 10 {
			t.Fatalf("final progress value %d instead of 10", receivedProgress[id])
		}
	}
}
Example #9
0
func TestWatchFinishedTransfer(t *testing.T) {
	makeXferFunc := func(id string) DoFunc {
		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
			xfer := NewTransfer()
			go func() {
				// Finish immediately
				close(progressChan)
			}()
			return xfer
		}
	}

	tm := NewTransferManager(5)

	// Start a transfer
	watchers := make([]*Watcher, 3)
	var xfer Transfer
	xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress)))

	// Give it a watcher immediately
	watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))

	// Wait for the transfer to complete
	<-xfer.Done()

	// Set up another watcher
	watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress)))

	// Release the watchers
	for _, w := range watchers {
		xfer.Release(w)
	}

	// Now that all watchers have been released, Released() should
	// return a closed channel.
	<-xfer.Released()
}
Example #10
0
// PushImage initiates a push operation on the repository named localName.
func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
	ref, err := reference.ParseNamed(image)
	if err != nil {
		return err
	}
	if tag != "" {
		// Push by digest is not supported, so only tags are supported.
		ref, err = reference.WithTag(ref, tag)
		if err != nil {
			return err
		}
	}

	// Include a buffer so that slow client connections don't affect
	// transfer performance.
	progressChan := make(chan progress.Progress, 100)

	writesDone := make(chan struct{})

	ctx, cancelFunc := context.WithCancel(ctx)

	go func() {
		writeDistributionProgress(cancelFunc, outStream, progressChan)
		close(writesDone)
	}()

	imagePushConfig := &distribution.ImagePushConfig{
		Config: distribution.Config{
			MetaHeaders:      metaHeaders,
			AuthConfig:       authConfig,
			ProgressOutput:   progress.ChanOutput(progressChan),
			RegistryService:  daemon.RegistryService,
			ImageEventLogger: daemon.LogImageEvent,
			MetadataStore:    daemon.distributionMetadataStore,
			ImageStore:       distribution.NewImageConfigStoreFromStore(daemon.imageStore),
			ReferenceStore:   daemon.referenceStore,
		},
		ConfigMediaType: schema2.MediaTypeImageConfig,
		LayerStore:      distribution.NewLayerProviderFromStore(daemon.layerStore),
		TrustKey:        daemon.trustKey,
		UploadManager:   daemon.uploadManager,
	}

	err = distribution.Push(ctx, ref, imagePushConfig)
	close(progressChan)
	<-writesDone
	return err
}
Example #11
0
func TestSuccessfulUpload(t *testing.T) {
	lum := NewLayerUploadManager(maxUploadConcurrency)

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]int64)

	go func() {
		for p := range progressChan {
			receivedProgress[p.ID] = p.Current
		}
		close(progressDone)
	}()

	var currentUploads int32
	descriptors := uploadDescriptors(&currentUploads)

	digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
	if err != nil {
		t.Fatalf("upload error: %v", err)
	}

	close(progressChan)
	<-progressDone

	if len(digests) != len(expectedDigests) {
		t.Fatal("wrong number of keys in digests map")
	}

	for key, val := range expectedDigests {
		if digests[key] != val {
			t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
		}
		if receivedProgress[key.String()] != 10 {
			t.Fatalf("missing or wrong progress output for %v", key)
		}
	}
}
Example #12
0
func TestDuplicateTransfer(t *testing.T) {
	ready := make(chan struct{})

	var xferFuncCalls int32

	makeXferFunc := func(id string) DoFunc {
		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
			atomic.AddInt32(&xferFuncCalls, 1)
			xfer := NewTransfer()
			go func() {
				defer func() {
					close(progressChan)
				}()
				<-ready
				for i := int64(0); ; i++ {
					select {
					case <-time.After(10 * time.Millisecond):
					case <-xfer.Context().Done():
						return
					}
					progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
				}
			}()
			return xfer
		}
	}

	tm := NewTransferManager(5)

	type transferInfo struct {
		xfer                  Transfer
		watcher               *Watcher
		progressChan          chan progress.Progress
		progressDone          chan struct{}
		receivedFirstProgress chan struct{}
	}

	progressConsumer := func(t transferInfo) {
		first := true
		for range t.progressChan {
			if first {
				close(t.receivedFirstProgress)
			}
			first = false
		}
		close(t.progressDone)
	}

	// Try to start multiple transfers with the same ID
	transfers := make([]transferInfo, 5)
	for i := range transfers {
		t := &transfers[i]
		t.progressChan = make(chan progress.Progress)
		t.progressDone = make(chan struct{})
		t.receivedFirstProgress = make(chan struct{})
		t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan))
		go progressConsumer(*t)
	}

	// Allow the transfer goroutine to proceed.
	close(ready)

	// Confirm that each watcher gets progress output.
	for _, t := range transfers {
		<-t.receivedFirstProgress
	}

	// Confirm that the transfer function was called exactly once.
	if xferFuncCalls != 1 {
		t.Fatal("transfer function wasn't called exactly once")
	}

	// Release one watcher every 5ms
	for _, t := range transfers {
		t.xfer.Release(t.watcher)
		<-time.After(5 * time.Millisecond)
	}

	for _, t := range transfers {
		// Now that all watchers have been released, Released() should
		// return a closed channel.
		<-t.xfer.Released()
		// Done() should return a closed channel because the xfer func returned
		// due to cancellation.
		<-t.xfer.Done()
	}

	for _, t := range transfers {
		close(t.progressChan)
		<-t.progressDone
	}
}
Example #13
0
func TestWatchRelease(t *testing.T) {
	ready := make(chan struct{})

	makeXferFunc := func(id string) DoFunc {
		return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
			xfer := NewTransfer()
			go func() {
				defer func() {
					close(progressChan)
				}()
				<-ready
				for i := int64(0); ; i++ {
					select {
					case <-time.After(10 * time.Millisecond):
					case <-xfer.Context().Done():
						return
					}
					progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10}
				}
			}()
			return xfer
		}
	}

	tm := NewTransferManager(5)

	type watcherInfo struct {
		watcher               *Watcher
		progressChan          chan progress.Progress
		progressDone          chan struct{}
		receivedFirstProgress chan struct{}
	}

	progressConsumer := func(w watcherInfo) {
		first := true
		for range w.progressChan {
			if first {
				close(w.receivedFirstProgress)
			}
			first = false
		}
		close(w.progressDone)
	}

	// Start a transfer
	watchers := make([]watcherInfo, 5)
	var xfer Transfer
	watchers[0].progressChan = make(chan progress.Progress)
	watchers[0].progressDone = make(chan struct{})
	watchers[0].receivedFirstProgress = make(chan struct{})
	xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan))
	go progressConsumer(watchers[0])

	// Give it multiple watchers
	for i := 1; i != len(watchers); i++ {
		watchers[i].progressChan = make(chan progress.Progress)
		watchers[i].progressDone = make(chan struct{})
		watchers[i].receivedFirstProgress = make(chan struct{})
		watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan))
		go progressConsumer(watchers[i])
	}

	// Now that the watchers are set up, allow the transfer goroutine to
	// proceed.
	close(ready)

	// Confirm that each watcher gets progress output.
	for _, w := range watchers {
		<-w.receivedFirstProgress
	}

	// Release one watcher every 5ms
	for _, w := range watchers {
		xfer.Release(w.watcher)
		<-time.After(5 * time.Millisecond)
	}

	// Now that all watchers have been released, Released() should
	// return a closed channel.
	<-xfer.Released()

	// Done() should return a closed channel because the xfer func returned
	// due to cancellation.
	<-xfer.Done()

	for _, w := range watchers {
		close(w.progressChan)
		<-w.progressDone
	}
}
Example #14
0
// Push pushes a plugin to the store.
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
	p, err := pm.config.Store.GetV2Plugin(name)
	if err != nil {
		return err
	}

	ref, err := reference.ParseNamed(p.Name())
	if err != nil {
		return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
	}

	var po progress.Output
	if outStream != nil {
		// Include a buffer so that slow client connections don't affect
		// transfer performance.
		progressChan := make(chan progress.Progress, 100)

		writesDone := make(chan struct{})

		defer func() {
			close(progressChan)
			<-writesDone
		}()

		var cancelFunc context.CancelFunc
		ctx, cancelFunc = context.WithCancel(ctx)

		go func() {
			progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
			close(writesDone)
		}()

		po = progress.ChanOutput(progressChan)
	} else {
		po = progress.DiscardOutput()
	}

	// TODO: replace these with manager
	is := &pluginConfigStore{
		pm:     pm,
		plugin: p,
	}
	ls := &pluginLayerProvider{
		pm:     pm,
		plugin: p,
	}
	rs := &pluginReference{
		name:     ref,
		pluginID: p.Config,
	}

	uploadManager := xfer.NewLayerUploadManager(3)

	imagePushConfig := &distribution.ImagePushConfig{
		Config: distribution.Config{
			MetaHeaders:      metaHeader,
			AuthConfig:       authConfig,
			ProgressOutput:   po,
			RegistryService:  pm.config.RegistryService,
			ReferenceStore:   rs,
			ImageEventLogger: pm.config.LogPluginEvent,
			ImageStore:       is,
			RequireSchema2:   true,
		},
		ConfigMediaType: schema2.MediaTypePluginConfig,
		LayerStore:      ls,
		UploadManager:   uploadManager,
	}

	return distribution.Push(ctx, ref, imagePushConfig)
}
Example #15
0
// makeDownloadFunc returns a func used by xfer.TransferManager to download a layer
func (ldm *LayerDownloader) makeDownloadFunc(layer *ImageWithMeta, ic *ImageC, parentDownload *downloadTransfer, layers []*ImageWithMeta) xfer.DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) xfer.Transfer {

		d := &downloadTransfer{
			Transfer: xfer.NewTransfer(),
			layer:    layer,
		}

		go func() {

			defer func() {
				close(progressChan)

				// remove layer from cache if there was an error attempting to download
				if d.err != nil {
					LayerCache().Remove(layer.ID)
				}

			}()

			progressOutput := progress.ChanOutput(progressChan)

			// wait for TransferManager to give the go-ahead
			select {
			case <-start:
			default:
				progress.Update(progressOutput, layer.String(), "Waiting")
				<-start
			}

			if parentDownload != nil {
				// bail if parent download failed or was cancelled
				select {
				case <-parentDownload.Done():
					if err := parentDownload.result(); err != nil {
						d.err = err
						return
					}
				default:
				}
			}

			// fetch blob
			diffID, err := FetchImageBlob(d.Transfer.Context(), ic.Options, layer, progressOutput)
			if err != nil {
				d.err = fmt.Errorf("%s/%s returned %s", ic.Image, layer.ID, err)
				return
			}

			layer.DiffID = diffID

			close(inactive)

			if parentDownload != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer download cancelled")
					return
				default:
					<-parentDownload.Done() // block until parent download completes
				}

				if err := parentDownload.result(); err != nil {
					d.err = err
					return
				}
			}

			// is this the leaf layer?
			imageLayer := layer.ID == layers[0].ID

			// if this is the leaf layer, we are done and can now create the image config
			if imageLayer {
				imageConfig, err := ic.CreateImageConfig(layers)
				if err != nil {
					d.err = err
					return
				}
				// cache and persist the image
				cache.ImageCache().Add(&imageConfig)
				cache.ImageCache().Save()

				// place calculated ImageID in struct
				ic.ImageID = imageConfig.ImageID

				if err = updateRepositoryCache(ic); err != nil {
					d.err = err
					return
				}

			}

			ldm.m.Lock()
			defer ldm.m.Unlock()

			// Write blob to the storage layer
			if err := ic.WriteImageBlob(layer, progressOutput, imageLayer); err != nil {
				d.err = err
				return
			}

			// mark the layer as finished downloading
			LayerCache().Commit(layer)

			ldm.unregisterDownload(layer)

		}()

		return d
	}
}
Example #16
0
// makeDownloadFunc returns a function that performs the layer download and
// registration. If parentDownload is non-nil, it waits for that download to
// complete before the registration step, and registers the downloaded data
// on top of parentDownload's resulting layer. Otherwise, it registers the
// layer on top of the ChainID given by parentLayer.
func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
		d := &downloadTransfer{
			Transfer:   NewTransfer(),
			layerStore: ldm.layerStore,
		}

		go func() {
			defer func() {
				close(progressChan)
			}()

			progressOutput := progress.ChanOutput(progressChan)

			select {
			case <-start:
			default:
				progress.Update(progressOutput, descriptor.ID(), "Waiting")
				<-start
			}

			if parentDownload != nil {
				// Did the parent download already fail or get
				// cancelled?
				select {
				case <-parentDownload.Done():
					_, err := parentDownload.result()
					if err != nil {
						d.err = err
						return
					}
				default:
				}
			}

			var (
				downloadReader io.ReadCloser
				size           int64
				err            error
				retries        int
			)

			defer descriptor.Close()

			for {
				downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput)
				if err == nil {
					break
				}

				// If an error was returned because the context
				// was cancelled, we shouldn't retry.
				select {
				case <-d.Transfer.Context().Done():
					d.err = err
					return
				default:
				}

				retries++
				if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts {
					logrus.Errorf("Download failed: %v", err)
					d.err = err
					return
				}

				logrus.Errorf("Download failed, retrying: %v", err)
				delay := retries * 5
				ticker := time.NewTicker(time.Second)

			selectLoop:
				for {
					progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
					select {
					case <-ticker.C:
						delay--
						if delay == 0 {
							ticker.Stop()
							break selectLoop
						}
					case <-d.Transfer.Context().Done():
						ticker.Stop()
						d.err = errors.New("download cancelled during retry delay")
						return
					}

				}
			}

			close(inactive)

			if parentDownload != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer registration cancelled")
					downloadReader.Close()
					return
				case <-parentDownload.Done():
				}

				l, err := parentDownload.result()
				if err != nil {
					d.err = err
					downloadReader.Close()
					return
				}
				parentLayer = l.ChainID()
			}

			reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting")
			defer reader.Close()

			inflatedLayerData, err := archive.DecompressStream(reader)
			if err != nil {
				d.err = fmt.Errorf("could not get decompression stream: %v", err)
				return
			}

			var src distribution.Descriptor
			if fs, ok := descriptor.(distribution.Describable); ok {
				src = fs.Descriptor()
			}
			if ds, ok := d.layerStore.(layer.DescribableStore); ok {
				d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src)
			} else {
				d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer)
			}
			if err != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer registration cancelled")
				default:
					d.err = fmt.Errorf("failed to register layer: %v", err)
				}
				return
			}

			progress.Update(progressOutput, descriptor.ID(), "Pull complete")
			withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
			if hasRegistered {
				withRegistered.Registered(d.layer.DiffID())
			}

			// Doesn't actually need to be its own goroutine, but
			// done like this so we can defer close(c).
			go func() {
				<-d.Transfer.Released()
				if d.layer != nil {
					layer.ReleaseAndLog(d.layerStore, d.layer)
				}
			}()
		}()

		return d
	}
}
Example #17
0
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
		u := &uploadTransfer{
			Transfer: NewTransfer(),
		}

		go func() {
			defer func() {
				close(progressChan)
			}()

			progressOutput := progress.ChanOutput(progressChan)

			select {
			case <-start:
			default:
				progress.Update(progressOutput, descriptor.ID(), "Waiting")
				<-start
			}

			retries := 0
			for {
				remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
				if err == nil {
					u.remoteDescriptor = remoteDescriptor
					break
				}

				// If an error was returned because the context
				// was cancelled, we shouldn't retry.
				select {
				case <-u.Transfer.Context().Done():
					u.err = err
					return
				default:
				}

				retries++
				if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts {
					logrus.Errorf("Upload failed: %v", err)
					u.err = err
					return
				}

				logrus.Errorf("Upload failed, retrying: %v", err)
				delay := retries * 5
				ticker := time.NewTicker(time.Second)

			selectLoop:
				for {
					progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
					select {
					case <-ticker.C:
						delay--
						if delay == 0 {
							ticker.Stop()
							break selectLoop
						}
					case <-u.Transfer.Context().Done():
						ticker.Stop()
						u.err = errors.New("upload cancelled during retry delay")
						return
					}
				}
			}
		}()

		return u
	}
}
Example #18
0
func TestSuccessfulDownload(t *testing.T) {
	// TODO Windows: Fix this unit text
	if runtime.GOOS == "windows" {
		t.Skip("Needs fixing on Windows")
	}
	layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)}
	ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency)

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]progress.Progress)

	go func() {
		for p := range progressChan {
			receivedProgress[p.ID] = p
		}
		close(progressDone)
	}()

	var currentDownloads int32
	descriptors := downloadDescriptors(&currentDownloads)

	firstDescriptor := descriptors[0].(*mockDownloadDescriptor)

	// Pre-register the first layer to simulate an already-existing layer
	l, err := layerStore.Register(firstDescriptor.mockTarStream(), "")
	if err != nil {
		t.Fatal(err)
	}
	firstDescriptor.diffID = l.DiffID()

	rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan))
	if err != nil {
		t.Fatalf("download error: %v", err)
	}

	releaseFunc()

	close(progressChan)
	<-progressDone

	if len(rootFS.DiffIDs) != len(descriptors) {
		t.Fatal("got wrong number of diffIDs in rootfs")
	}

	for i, d := range descriptors {
		descriptor := d.(*mockDownloadDescriptor)

		if descriptor.diffID != "" {
			if receivedProgress[d.ID()].Action != "Already exists" {
				t.Fatalf("did not get 'Already exists' message for %v", d.ID())
			}
		} else if receivedProgress[d.ID()].Action != "Pull complete" {
			t.Fatalf("did not get 'Pull complete' message for %v", d.ID())
		}

		if rootFS.DiffIDs[i] != descriptor.expectedDiffID {
			t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i])
		}

		if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] {
			t.Fatal("diffID mismatch between rootFS and Registered callback")
		}
	}
}