示例#1
0
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	if len(blobs) == 0 {
		return nil
	}

	statSend := func(ref blob.Ref) error {
		fi, err := os.Stat(ds.blobPath(ds.partition, ref))
		switch {
		case err == nil && fi.Mode().IsRegular():
			dest <- blob.SizedRef{Ref: ref, Size: fi.Size()}
			return nil
		case err != nil && !os.IsNotExist(err):
			return err
		}
		return nil
	}

	if len(blobs) == 1 {
		return statSend(blobs[0])
	}

	errc := make(chan error, len(blobs))

	gt := gate.New(maxParallelStats)
	for _, ref := range blobs {
		gt.Start()
		go func(ref blob.Ref) {
			defer gt.Done()
			errc <- statSend(ref)
		}(ref)
	}
	for _ = range blobs {
		if err := <-errc; err != nil {
			return err
		}
	}
	return nil
}
示例#2
0
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package s3

import (
	"fmt"
	"os"

	"camlistore.org/pkg/blob"
	"camlistore.org/pkg/gate"
)

var statGate = gate.New(20) // arbitrary

func (sto *s3Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	errc := make(chan error, len(blobs))
	for _, br := range blobs {
		statGate.Start()
		go func(br blob.Ref) {
			defer statGate.Done()
			size, err := sto.s3Client.Stat(br.String(), sto.bucket)
			switch err {
			case nil:
				dest <- blob.SizedRef{Ref: br, Size: size}
				errc <- nil
			case os.ErrNotExist:
				errc <- nil
			default:
示例#3
0
func writeFileChunks(bs blobserver.StatReceiver, file *Builder, r io.Reader) (n int64, spans []span, outerr error) {
	src := &noteEOFReader{r: r}
	bufr := bufio.NewReaderSize(src, bufioReaderSize)
	spans = []span{} // the tree of spans, cut on interesting rollsum boundaries
	rs := rollsum.New()
	var last int64
	var buf bytes.Buffer
	blobSize := 0 // of the next blob being built, should be same as buf.Len()

	const chunksInFlight = 32 // at ~64 KB chunks, this is ~2MB memory per file
	gatec := gate.New(chunksInFlight)
	firsterrc := make(chan error, 1)

	// uploadLastSpan runs in the same goroutine as the loop below and is responsible for
	// starting uploading the contents of the buf.  It returns false if there's been
	// an error and the loop below should be stopped.
	uploadLastSpan := func() bool {
		chunk := buf.String()
		buf.Reset()
		br := blob.SHA1FromString(chunk)
		spans[len(spans)-1].br = br
		select {
		case outerr = <-firsterrc:
			return false
		default:
			// No error seen so far, continue.
		}
		gatec.Start()
		go func() {
			defer gatec.Done()
			if _, err := uploadString(bs, br, chunk); err != nil {
				select {
				case firsterrc <- err:
				default:
				}
			}
		}()
		return true
	}

	for {
		c, err := bufr.ReadByte()
		if err == io.EOF {
			if n != last {
				spans = append(spans, span{from: last, to: n})
				if !uploadLastSpan() {
					return
				}
			}
			break
		}
		if err != nil {
			return 0, nil, err
		}

		buf.WriteByte(c)
		n++
		blobSize++
		rs.Roll(c)

		var bits int
		onRollSplit := rs.OnSplit()
		switch {
		case blobSize == maxBlobSize:
			bits = 20 // arbitrary node weight; 1<<20 == 1MB
		case src.sawEOF:
			// Don't split. End is coming soon enough.
			continue
		case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold:
			bits = rs.Bits()
		case n == firstChunkSize:
			bits = 18 // 1 << 18 == 256KB
		default:
			// Don't split.
			continue
		}
		blobSize = 0

		// Take any spans from the end of the spans slice that
		// have a smaller 'bits' score and make them children
		// of this node.
		var children []span
		childrenFrom := len(spans)
		for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
			childrenFrom--
		}
		if nCopy := len(spans) - childrenFrom; nCopy > 0 {
			children = make([]span, nCopy)
			copy(children, spans[childrenFrom:])
			spans = spans[:childrenFrom]
		}

		spans = append(spans, span{from: last, to: n, bits: bits, children: children})
		last = n
		if !uploadLastSpan() {
			return
		}
	}

	// Loop was already hit earlier.
	if outerr != nil {
		return 0, nil, outerr
	}

	// Wait for all uploads to finish, one way or another, and then
	// see if any generated errors.
	// Once this loop is done, we own all the tokens in gatec, so nobody
	// else can have one outstanding.
	for i := 0; i < chunksInFlight; i++ {
		gatec.Start()
	}
	select {
	case err := <-firsterrc:
		return 0, nil, err
	default:
	}

	return n, spans, nil

}