func (sto *condStorage) EnumerateBlobs(dest chan<- blobref.SizedBlobRef, after string, limit int, wait time.Duration) error { if sto.read != nil { rsto := blobserver.MaybeWrapContext(sto.read, sto.ctx) return rsto.EnumerateBlobs(dest, after, limit, wait) } return errors.New("cond: Read not configured") }
func (sto *replicaStorage) RemoveBlobs(blobs []*blobref.BlobRef) error { errch := make(chan error, buffered) removeFrom := func(s blobserver.Storage) { s = blobserver.MaybeWrapContext(s, sto.ctx) errch <- s.RemoveBlobs(blobs) } for _, replica := range sto.replicas { go removeFrom(replica) } var reterr error nSuccess := 0 for _ = range errch { if err := <-errch; err != nil { reterr = err } else { nSuccess++ } } if nSuccess > 0 { // TODO: decide on the return value. for now this is best // effort and we return nil if any of the blobservers said // success. maybe a bit weird, though. return nil } return reterr }
func (sto *condStorage) StatBlobs(dest chan<- blobref.SizedBlobRef, blobs []*blobref.BlobRef, wait time.Duration) error { if sto.read != nil { rsto := blobserver.MaybeWrapContext(sto.read, sto.ctx) return rsto.StatBlobs(dest, blobs, wait) } return errors.New("cond: Read not configured") }
func (sto *condStorage) RemoveBlobs(blobs []*blobref.BlobRef) error { if sto.remove != nil { rsto := blobserver.MaybeWrapContext(sto.remove, sto.ctx) return rsto.RemoveBlobs(blobs) } return errors.New("cond: Remove not configured") }
func (sto *condStorage) FetchStreaming(b *blobref.BlobRef) (file io.ReadCloser, size int64, err error) { if sto.read != nil { rsto := blobserver.MaybeWrapContext(sto.read, sto.ctx) return rsto.FetchStreaming(b) } err = errors.New("cond: Read not configured") return }
func (sto *replicaStorage) wrappedReplicas() []blobserver.Storage { if sto.ctx == nil { return sto.replicas } w := make([]blobserver.Storage, len(sto.replicas)) for i, r := range sto.replicas { w[i] = blobserver.MaybeWrapContext(r, sto.ctx) } return w }
// TODO-GO: s/xxgo/_/ once Go issue 1802 is fixd func (sto *replicaStorage) ReceiveBlob(b *blobref.BlobRef, source io.Reader) (xxgo blobref.SizedBlobRef, err error) { nReplicas := len(sto.replicas) rpipe, wpipe, writer := make([]*io.PipeReader, nReplicas), make([]*io.PipeWriter, nReplicas), make([]io.Writer, nReplicas) for idx := range sto.replicas { rpipe[idx], wpipe[idx] = io.Pipe() writer[idx] = wpipe[idx] // TODO: deal with slow/hung clients. this scheme of pipes + // multiwriter (even with a bufio.Writer thrown in) isn't // sufficient to guarantee forward progress. perhaps something // like &MoveOrDieWriter{Writer: wpipe[idx], HeartbeatSec: 10} } upResult := make(chan sizedBlobAndError, nReplicas) uploadToReplica := func(source io.Reader, s blobserver.Storage) { s = blobserver.MaybeWrapContext(s, sto.ctx) sb, err := s.ReceiveBlob(b, source) if err != nil { io.Copy(ioutil.Discard, source) } upResult <- sizedBlobAndError{sb, err} } for idx, replica := range sto.wrappedReplicas() { go uploadToReplica(rpipe[idx], replica) } size, err := io.Copy(io.MultiWriter(writer...), source) if err != nil { return } for idx := range sto.replicas { wpipe[idx].Close() } nSuccess, nFailures := 0, 0 for _ = range sto.replicas { res := <-upResult switch { case res.err == nil && res.sb.Size == size: nSuccess++ if nSuccess == sto.minWritesForSuccess { sto.GetBlobHub().NotifyBlobReceived(b) return res.sb, nil } case res.err == nil: nFailures++ err = fmt.Errorf("replica: upload shard reported size %d, expected %d", res.sb.Size, size) default: nFailures++ err = res.err } } if nFailures > 0 { log.Printf("replica: receiving blob, %d successes, %d failures; last error = %v", nSuccess, nFailures, err) } return }
func (sto *condStorage) ReceiveBlob(b *blobref.BlobRef, source io.Reader) (sb blobref.SizedBlobRef, err error) { destSto, overRead, err := sto.storageForReceive(source) if err != nil { return } if len(overRead) > 0 { source = io.MultiReader(bytes.NewBuffer(overRead), source) } destSto = blobserver.MaybeWrapContext(destSto, sto.ctx) return destSto.ReceiveBlob(b, source) }