// Upload is called to perform the upload. func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { if u.currentUploads != nil { defer atomic.AddInt32(u.currentUploads, -1) if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { return distribution.Descriptor{}, errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming upload. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return distribution.Descriptor{}, ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) } } if u.simulateRetries != 0 { u.simulateRetries-- return distribution.Descriptor{}, errors.New("simulating retry") } return distribution.Descriptor{}, nil }
// Download is called to perform the download. func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { if d.currentDownloads != nil { defer atomic.AddInt32(d.currentDownloads, -1) if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { return nil, 0, errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming download. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return nil, 0, ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) } } if d.simulateRetries != 0 { d.simulateRetries-- return nil, 0, errors.New("simulating retry") } return d.mockTarStream(), 0, nil }
// Upload is called to perform the upload. func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) { if u.currentUploads != nil { defer atomic.AddInt32(u.currentUploads, -1) if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { return "", errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming upload. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return "", ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) } } if u.simulateRetries != 0 { u.simulateRetries-- return "", errors.New("simulating retry") } // For the mock implementation, use SHA256(DiffID) as the returned // digest. return digest.FromBytes([]byte(u.diffID.String())) }
// Watch adds a watcher to the transfer. The supplied channel gets progress // updates and is closed when the transfer finishes. func (t *transfer) Watch(progressOutput progress.Output) *Watcher { t.mu.Lock() defer t.mu.Unlock() w := &Watcher{ releaseChan: make(chan struct{}), signalChan: make(chan struct{}), running: make(chan struct{}), } if t.broadcastDone { close(w.running) return w } t.watchers[w.releaseChan] = w go func() { defer func() { close(w.running) }() done := false for { t.mu.Lock() hasLastProgress := t.hasLastProgress lastProgress := t.lastProgress t.mu.Unlock() // This might write the last progress item a // second time (since channel closure also gets // us here), but that's fine. if hasLastProgress { progressOutput.WriteProgress(lastProgress) } if done { return } select { case <-w.signalChan: case <-w.releaseChan: done = true // Since the watcher is going to detach, make // sure the broadcaster is caught up so we // don't miss anything. select { case t.broadcastSyncChan <- struct{}{}: case <-t.running: } case <-t.running: done = true } } }() return w }
// Watch adds a watcher to the transfer. The supplied channel gets progress // updates and is closed when the transfer finishes. func (t *transfer) Watch(progressOutput progress.Output) *Watcher { t.mu.Lock() defer t.mu.Unlock() w := &Watcher{ releaseChan: make(chan struct{}), signalChan: make(chan struct{}), running: make(chan struct{}), } t.watchers[w.releaseChan] = w if t.broadcastDone { close(w.running) return w } go func() { defer func() { close(w.running) }() var ( done bool lastWritten progress.Progress hasLastWritten bool ) for { t.mu.Lock() hasLastProgress := t.hasLastProgress lastProgress := t.lastProgress t.mu.Unlock() // Make sure we don't write the last progress item // twice. if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { progressOutput.WriteProgress(lastProgress) lastWritten = lastProgress hasLastWritten = true } if done { return } select { case <-w.signalChan: case <-w.releaseChan: done = true // Since the watcher is going to detach, make // sure the broadcaster is caught up so we // don't miss anything. select { case t.broadcastSyncChan <- struct{}{}: case <-t.running: } case <-t.running: done = true } } }() return w }