예제 #1
0
파일: gc.go 프로젝트: postfix/golib-1
// ctx will be canceled on failure
func (c *Collector) markItem(ctx *context.Context, it Item, isRoot bool) error {
	if !isRoot {
		marked, err := c.Marker.IsMarked(it)
		if err != nil {
			return err
		}
		if marked {
			return nil
		}
	}
	if err := c.Marker.Mark(it); err != nil {
		return err
	}

	ch := make(chan Item, buffered)
	var grp syncutil.Group
	grp.Go(func() error {
		return c.ItemEnumerator.EnumerateItem(ctx, it, ch)
	})
	grp.Go(func() error {
		for it := range ch {
			if err := c.markItem(ctx, it, false); err != nil {
				return err
			}
		}
		return nil
	})
	if err := grp.Err(); err != nil {
		ctx.Cancel()
		return err
	}
	return nil
}
예제 #2
0
func (dr *DescribeRequest) describeReally(br blob.Ref, depth int) {
	mime, size, err := dr.sh.index.GetBlobMIMEType(br)
	if err == os.ErrNotExist {
		return
	}
	if err != nil {
		dr.addError(br, err)
		return
	}

	// TODO: convert all this in terms of
	// DescribedBlob/DescribedPermanode/DescribedFile, not json
	// maps.  Then add JSON marhsallers to those types. Add tests.
	des := dr.describedBlob(br)
	des.setMIMEType(mime)
	des.Size = size

	switch des.CamliType {
	case "permanode":
		des.Permanode = new(DescribedPermanode)
		dr.populatePermanodeFields(des.Permanode, br, dr.sh.owner, depth)
	case "file":
		var err error
		des.File, err = dr.sh.index.GetFileInfo(br)
		if err != nil {
			if os.IsNotExist(err) {
				log.Printf("index.GetFileInfo(file %s) failed; index stale?", br)
			} else {
				dr.addError(br, err)
			}
			return
		}
		if des.File.IsImage() {
			des.Image, err = dr.sh.index.GetImageInfo(br)
			if err != nil {
				if os.IsNotExist(err) {
					log.Printf("index.GetImageInfo(file %s) failed; index stale?", br)
				} else {
					dr.addError(br, err)
				}
			}
		}
	case "directory":
		var g syncutil.Group
		g.Go(func() (err error) {
			des.Dir, err = dr.sh.index.GetFileInfo(br)
			if os.IsNotExist(err) {
				log.Printf("index.GetFileInfo(directory %s) failed; index stale?", br)
			}
			return
		})
		g.Go(func() (err error) {
			des.DirChildren, err = dr.getDirMembers(br, depth)
			return
		})
		if err := g.Err(); err != nil {
			dr.addError(br, err)
		}
	}
}
예제 #3
0
func testEnumerate(t *testing.T, sto blobserver.Storage, wantUnsorted []blob.SizedRef, opts ...interface{}) {
	var after string
	var n = 1000
	for _, opt := range opts {
		switch v := opt.(type) {
		case string:
			after = v
		case int:
			n = v
		default:
			panic("bad option of type " + fmt.Sprint("%T", v))
		}
	}

	want := append([]blob.SizedRef(nil), wantUnsorted...)
	sort.Sort(blob.SizedByRef(want))

	sbc := make(chan blob.SizedRef, 10)

	var got []blob.SizedRef
	var grp syncutil.Group
	sawEnd := make(chan bool, 1)
	grp.Go(func() error {
		if err := sto.EnumerateBlobs(context.New(), sbc, after, n); err != nil {
			return fmt.Errorf("EnumerateBlobs(%q, %d): %v", after, n)
		}
		return nil
	})
	grp.Go(func() error {
		for sb := range sbc {
			if !sb.Valid() {
				return fmt.Errorf("invalid blobref %#v received in enumerate", sb)
			}
			got = append(got, sb)
		}
		sawEnd <- true
		return nil

	})
	grp.Go(func() error {
		select {
		case <-sawEnd:
			return nil
		case <-time.After(10 * time.Second):
			return errors.New("timeout waiting for EnumerateBlobs to close its channel")
		}

	})
	if err := grp.Err(); err != nil {
		t.Fatalf("Enumerate error: %v", err)
		return
	}
	if len(got) == 0 && len(want) == 0 {
		return
	}
	if !reflect.DeepEqual(got, want) {
		t.Fatalf("Enumerate mismatch. Got %d; want %d.\n Got: %v\nWant: %v\n",
			len(got), len(want), got, want)
	}
}
예제 #4
0
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	// TODO: use cache
	var grp syncutil.Group
	gate := syncutil.NewGate(20) // arbitrary cap
	for i := range blobs {
		br := blobs[i]
		gate.Start()
		grp.Go(func() error {
			defer gate.Done()
			size, exists, err := s.client.StatObject(
				&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
			if err != nil {
				return err
			}
			if !exists {
				return nil
			}
			if size > constants.MaxBlobSize {
				return fmt.Errorf("blob %s stat size too large (%d)", br, size)
			}
			dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
			return nil
		})
	}
	return grp.Err()
}
예제 #5
0
파일: stat.go 프로젝트: sfrdmn/camlistore
func (sto *s3Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) (err error) {
	if faultStat.FailErr(&err) {
		return
	}
	// TODO: use sto.cache
	var wg syncutil.Group
	for _, br := range blobs {
		br := br
		statGate.Start()
		wg.Go(func() error {
			defer statGate.Done()

			size, err := sto.s3Client.Stat(br.String(), sto.bucket)
			if err == nil {
				dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
				return nil
			}
			if err == os.ErrNotExist {
				return nil
			}
			return fmt.Errorf("error statting %v: %v", br, err)
		})
	}
	return wg.Err()
}
예제 #6
0
func (h *memHub) NotifyBlobReceived(sb blob.SizedRef) error {
	h.mu.RLock()
	defer h.mu.RUnlock()

	br := sb.Ref

	// Synchronous hooks. If error, prevents notifying other
	// subscribers.
	var grp syncutil.Group
	for i := range h.hooks {
		hook := h.hooks[i]
		grp.Go(func() error { return hook(sb) })
	}
	if err := grp.Err(); err != nil {
		return err
	}

	// Global listeners
	for ch := range h.listeners {
		ch := ch
		go func() { ch <- br }()
	}

	// Blob-specific listeners
	for ch := range h.blobListeners[br] {
		ch := ch
		go func() { ch <- br }()
	}
	return nil
}
예제 #7
0
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	if len(blobs) == 0 {
		return nil
	}

	statSend := func(ref blob.Ref) error {
		fi, err := os.Stat(ds.blobPath(ds.partition, ref))
		switch {
		case err == nil && fi.Mode().IsRegular():
			dest <- blob.SizedRef{Ref: ref, Size: fi.Size()}
			return nil
		case err != nil && !os.IsNotExist(err):
			return err
		}
		return nil
	}

	if len(blobs) == 1 {
		return statSend(blobs[0])
	}

	var wg syncutil.Group
	for _, ref := range blobs {
		ref := ref
		statGate.Start()
		wg.Go(func() error {
			defer statGate.Done()
			return statSend(ref)
		})
	}
	return wg.Err()
}
예제 #8
0
func (s *storage) RemoveBlobs(blobs []blob.Ref) error {
	// Plan:
	//  -- delete from small (if it's there)
	//  -- if in big, update the meta index to note that it's there, but deleted.
	//  -- fetch big's zip file (constructed from a ReaderAt that is all dummy zeros +
	//     the zip's TOC only, relying on big being a SubFetcher, and keeping info in
	//     the meta about the offset of the TOC+total size of each big's zip)
	//  -- iterate over the zip's blobs (at some point). If all are marked deleted, actually RemoveBlob
	//     on big to delete the full zip and then delete all the meta rows.
	var (
		mu       sync.Mutex
		unpacked []blob.Ref
		packed   []blob.Ref
		large    = map[blob.Ref]bool{} // the large blobs that packed are in
	)
	var grp syncutil.Group
	delGate := syncutil.NewGate(removeLookups)
	for _, br := range blobs {
		br := br
		delGate.Start()
		grp.Go(func() error {
			defer delGate.Done()
			m, err := s.getMetaRow(br)
			if err != nil {
				return err
			}
			mu.Lock()
			defer mu.Unlock()
			if m.isPacked() {
				packed = append(packed, br)
				large[m.largeRef] = true
			} else {
				unpacked = append(unpacked, br)
			}
			return nil
		})
	}
	if err := grp.Err(); err != nil {
		return err
	}
	if len(unpacked) > 0 {
		grp.Go(func() error {
			return s.small.RemoveBlobs(unpacked)
		})
	}
	if len(packed) > 0 {
		grp.Go(func() error {
			bm := s.meta.BeginBatch()
			now := time.Now()
			for zipRef := range large {
				bm.Set("d:"+zipRef.String(), fmt.Sprint(now.Unix()))
			}
			for _, br := range packed {
				bm.Delete("b:" + br.String())
			}
			return s.meta.CommitBatch(bm)
		})
	}
	return grp.Err()
}
예제 #9
0
파일: deploy.go 프로젝트: Micrap/camlistore
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network.
func (d *Deployer) setFirewall(ctx *context.Context, computeService *compute.Service) error {
	defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do()
	if err != nil {
		return fmt.Errorf("error getting default network: %v", err)
	}

	needRules := map[string]compute.Firewall{
		"default-allow-http": compute.Firewall{
			Name:         "default-allow-http",
			SourceRanges: []string{"0.0.0.0/0"},
			SourceTags:   []string{"http-server"},
			Allowed:      []*compute.FirewallAllowed{{"tcp", []string{"80"}}},
			Network:      defaultNet.SelfLink,
		},
		"default-allow-https": compute.Firewall{
			Name:         "default-allow-https",
			SourceRanges: []string{"0.0.0.0/0"},
			SourceTags:   []string{"https-server"},
			Allowed:      []*compute.FirewallAllowed{{"tcp", []string{"443"}}},
			Network:      defaultNet.SelfLink,
		},
	}

	rules, err := computeService.Firewalls.List(d.Conf.Project).Do()
	if err != nil {
		return fmt.Errorf("error listing rules: %v", err)
	}
	for _, it := range rules.Items {
		delete(needRules, it.Name)
	}
	if len(needRules) == 0 {
		return nil
	}

	if Verbose {
		log.Printf("Need to create rules: %v", needRules)
	}
	var wg syncutil.Group
	for name, rule := range needRules {
		if ctx.IsCanceled() {
			return context.ErrCanceled
		}
		name, rule := name, rule
		wg.Go(func() error {
			if Verbose {
				log.Printf("Creating rule %s", name)
			}
			r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do()
			if err != nil {
				return fmt.Errorf("error creating rule %s: %v", name, err)
			}
			if Verbose {
				log.Printf("Created rule %s: %+v", name, r)
			}
			return nil
		})
	}
	return wg.Err()
}
예제 #10
0
파일: mut.go 프로젝트: rayleyva/camlistore
// &RenameRequest{Header:fuse.Header{Conn:(*fuse.Conn)(0xc210048180), ID:0x2, Node:0x8, Uid:0xf0d4, Gid:0x1388, Pid:0x5edb}, NewDir:0x8, OldName:"1", NewName:"2"}
func (n *mutDir) Rename(req *fuse.RenameRequest, newDir fuse.Node, intr fuse.Intr) fuse.Error {
	n2, ok := newDir.(*mutDir)
	if !ok {
		log.Printf("*mutDir newDir node isn't a *mutDir; is a %T; can't handle. returning EIO.", newDir)
		return fuse.EIO
	}

	var wg syncutil.Group
	wg.Go(n.populate)
	wg.Go(n2.populate)
	if err := wg.Err(); err != nil {
		log.Printf("*mutDir.Rename src dir populate = %v", err)
		return fuse.EIO
	}

	n.mu.Lock()
	target, ok := n.children[req.OldName]
	n.mu.Unlock()
	if !ok {
		log.Printf("*mutDir.Rename src name %q isn't known", req.OldName)
		return fuse.ENOENT
	}

	now := time.Now()

	// Add a camliPath:name attribute to the dest permanode before unlinking it from
	// the source.
	claim := schema.NewSetAttributeClaim(n2.permanode, "camliPath:"+req.NewName, target.permanodeString())
	claim.SetClaimDate(now)
	_, err := n.fs.client.UploadAndSignBlob(claim)
	if err != nil {
		log.Printf("Upload rename link error: %v", err)
		return fuse.EIO
	}

	delClaim := schema.NewDelAttributeClaim(n.permanode, "camliPath:"+req.OldName, "")
	delClaim.SetClaimDate(now)
	_, err = n.fs.client.UploadAndSignBlob(delClaim)
	if err != nil {
		log.Printf("Upload rename src unlink error: %v", err)
		return fuse.EIO
	}

	// TODO(bradfitz): this locking would be racy, if the kernel
	// doesn't do it properly. (It should) Let's just trust the
	// kernel for now. Later we can verify and remove this
	// comment.
	n.mu.Lock()
	if n.children[req.OldName] != target {
		panic("Race.")
	}
	delete(n.children, req.OldName)
	n.mu.Unlock()
	n2.mu.Lock()
	n2.children[req.NewName] = target
	n2.mu.Unlock()

	return nil
}
예제 #11
0
func (up *Uploader) Close() error {
	var grp syncutil.Group
	if up.haveCache != nil {
		grp.Go(up.haveCache.Close)
	}
	grp.Go(up.Client.Close)
	return grp.Err()
}
예제 #12
0
파일: mut.go 프로젝트: rayleyva/camlistore
func (n *mutDir) creat(name string, typ nodeType) (fuse.Node, error) {
	// Create a Permanode for the file/directory.
	pr, err := n.fs.client.UploadNewPermanode()
	if err != nil {
		return nil, err
	}

	var grp syncutil.Group
	grp.Go(func() (err error) {
		// Add a camliPath:name attribute to the directory permanode.
		claim := schema.NewSetAttributeClaim(n.permanode, "camliPath:"+name, pr.BlobRef.String())
		_, err = n.fs.client.UploadAndSignBlob(claim)
		return
	})
	if stupidMacExtendedAttributeName(name) {
		grp.Go(func() (err error) {
			// Add a camliPath:name attribute to the directory permanode.
			claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliDefVis", "hide")
			_, err = n.fs.client.UploadAndSignBlob(claim)
			return
		})
	}
	if err := grp.Err(); err != nil {
		return nil, err
	}

	// Add a child node to this node.
	var child mutFileOrDir
	switch typ {
	case dirType:
		child = &mutDir{
			fs:        n.fs,
			permanode: pr.BlobRef,
			parent:    n,
			name:      name,
		}
	case fileType, symlinkType:
		child = &mutFile{
			fs:        n.fs,
			permanode: pr.BlobRef,
			parent:    n,
			name:      name,
		}
	default:
		panic("bogus creat type")
	}
	n.mu.Lock()
	if n.children == nil {
		n.children = make(map[string]mutFileOrDir)
	}
	n.children[name] = child
	n.mu.Unlock()

	return child, nil
}
예제 #13
0
func (c *LogicalConstraint) blobMatches(s *search, br blob.Ref, bm BlobMeta) (bool, error) {
	switch c.Op {
	case "and", "xor":
		if c.A == nil || c.B == nil {
			return false, errors.New("In LogicalConstraint, need both A and B set")
		}
		var g syncutil.Group
		var av, bv bool
		g.Go(func() (err error) {
			av, err = c.A.blobMatches(s, br, bm)
			return
		})
		g.Go(func() (err error) {
			bv, err = c.B.blobMatches(s, br, bm)
			return
		})
		if err := g.Err(); err != nil {
			return false, err
		}
		switch c.Op {
		case "and":
			return av && bv, nil
		case "xor":
			return av != bv, nil
		default:
			panic("unreachable")
		}
	case "or":
		if c.A == nil || c.B == nil {
			return false, errors.New("In LogicalConstraint, need both A and B set")
		}
		av, err := c.A.blobMatches(s, br, bm)
		if err != nil {
			return false, err
		}
		if av {
			// Short-circuit.
			return true, nil
		}
		return c.B.blobMatches(s, br, bm)
	case "not":
		if c.A == nil {
			return false, errors.New("In LogicalConstraint, need to set A")
		}
		if c.B != nil {
			return false, errors.New("In LogicalConstraint, can't specify B with Op \"not\"")
		}
		v, err := c.A.blobMatches(s, br, bm)
		return !v, err
	default:
		return false, fmt.Errorf("In LogicalConstraint, unknown operation %q", c.Op)
	}
}
예제 #14
0
func (sto *s3Storage) RemoveBlobs(blobs []blob.Ref) error {
	var wg syncutil.Group

	for _, blob := range blobs {
		blob := blob
		removeGate.Start()
		wg.Go(func() error {
			defer removeGate.Done()
			return sto.s3Client.Delete(sto.bucket, blob.String())
		})
	}
	return wg.Err()

}
예제 #15
0
파일: deploy.go 프로젝트: Micrap/camlistore
// projectHasInstance checks for all the possible zones if there's already an instance for the project.
// It returns the name of the zone at the first instance it finds, if any.
func (d *Deployer) projectHasInstance() (zone string, err error) {
	s, err := compute.New(d.Client)
	if err != nil {
		return "", err
	}
	// TODO(mpl): make use of the handler's cached zones.
	zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do()
	if err != nil {
		return "", fmt.Errorf("could not get a list of zones: %v", err)
	}
	computeService, _ := compute.New(d.Client)
	var zoneOnce sync.Once
	var grp syncutil.Group
	errc := make(chan error, 1)
	zonec := make(chan string, 1)
	timeout := time.NewTimer(30 * time.Second)
	defer timeout.Stop()
	for _, z := range zl.Items {
		z := z
		grp.Go(func() error {
			list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do()
			if err != nil {
				return fmt.Errorf("could not list existing instances: %v", err)
			}
			if len(list.Items) > 0 {
				zoneOnce.Do(func() {
					zonec <- z.Name
				})
			}
			return nil
		})
	}
	go func() {
		errc <- grp.Err()
	}()
	// We block until either an instance was found in a zone, or all the instance
	// listing is done. Or we timed-out.
	select {
	case err = <-errc:
		return "", err
	case zone = <-zonec:
		// We voluntarily ignore any listing error if we found at least one instance
		// because that's what we primarily want to report about.
		return zone, nil
	case <-timeout.C:
		return "", errors.New("timed out")
	}
}
예제 #16
0
// SetAttrs sets multiple attributes. The provided keyval should be an even number of alternating key/value pairs to set.
func (o *Object) SetAttrs(keyval ...string) error {
	if len(keyval)%2 == 1 {
		panic("importer.SetAttrs: odd argument count")
	}

	g := syncutil.Group{}
	for i := 0; i < len(keyval); i += 2 {
		key, val := keyval[i], keyval[i+1]
		if val != o.Attr(key) {
			g.Go(func() error {
				return o.SetAttr(key, val)
			})
		}
	}
	return g.Err()
}
예제 #17
0
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error {
	if s.cache != nil {
		s.cache.RemoveBlobs(blobs)
	}
	gate := syncutil.NewGate(50) // arbitrary
	var grp syncutil.Group
	for i := range blobs {
		gate.Start()
		br := blobs[i]
		grp.Go(func() error {
			defer gate.Done()
			return s.client.DeleteObject(&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()})
		})
	}
	return grp.Err()
}
예제 #18
0
func (m *mongoStorage) RemoveBlobs(blobs []blob.Ref) error {
	var wg syncutil.Group

	for _, blob := range blobs {
		blob := blob
		removeGate.Start()
		wg.Go(func() error {
			defer removeGate.Done()
			err := m.c.Remove(bson.M{"key": blob.String()})
			if err == mgo.ErrNotFound {
				return nil
			}
			return err
		})
	}
	return wg.Err()

}
예제 #19
0
파일: roots.go 프로젝트: sfrdmn/camlistore
func (n *rootsDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {
	if n.isRO() {
		return nil, fuse.EPERM
	}

	name := req.Name

	// Create a Permanode for the root.
	pr, err := n.fs.client.UploadNewPermanode()
	if err != nil {
		log.Printf("rootsDir.Create(%q): %v", name, err)
		return nil, fuse.EIO
	}

	var grp syncutil.Group
	// Add a camliRoot attribute to the root permanode.
	grp.Go(func() (err error) {
		claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliRoot", name)
		_, err = n.fs.client.UploadAndSignBlob(claim)
		return
	})
	// Set the title of the root permanode to the root name.
	grp.Go(func() (err error) {
		claim := schema.NewSetAttributeClaim(pr.BlobRef, "title", name)
		_, err = n.fs.client.UploadAndSignBlob(claim)
		return
	})
	if err := grp.Err(); err != nil {
		log.Printf("rootsDir.Create(%q): %v", name, err)
		return nil, fuse.EIO
	}

	nod := &mutDir{
		fs:        n.fs,
		permanode: pr.BlobRef,
		name:      name,
		xattrs:    map[string][]byte{},
	}
	n.mu.Lock()
	n.m[name] = pr.BlobRef
	n.mu.Unlock()

	return nod, nil
}
예제 #20
0
func (h *memHub) NotifyBlobReceived(sb blob.SizedRef) error {
	h.mu.RLock()
	defer h.mu.RUnlock()

	br := sb.Ref

	// Callback channels to notify, nil until non-empty
	var notify []chan<- blob.Ref

	// Append global listeners
	for ch := range h.listeners {
		notify = append(notify, ch)
	}

	// Append blob-specific listeners
	if h.blobListeners != nil {
		if set, ok := h.blobListeners[br]; ok {
			for ch, _ := range set {
				notify = append(notify, ch)
			}
		}
	}

	if len(notify) > 0 {
		// Run in a separate Goroutine so NotifyBlobReceived doesn't block
		// callers if callbacks are slow.
		go func() {
			for _, ch := range notify {
				ch <- br
			}
		}()
	}

	var grp syncutil.Group
	for i := range h.hooks {
		hook := h.hooks[i]
		grp.Go(func() error { return hook(sb) })
	}

	return grp.Err()
}
예제 #21
0
func storePhoto(p photo) (string, error) {
	srcFile := localPathOf(p)

	f, err := os.Open(srcFile)
	if err != nil {
		return "", err
	}
	defer f.Close()

	fileRef, err := schema.WriteFileFromReader(camliClient, p.Id+"."+p.Extension, f)

	res, err := camliClient.UploadNewPermanode()
	if err != nil {
		return "", err
	}
	perma := res.BlobRef

	p.Description = cleanHTML(p.Description)

	claims := []*schema.Builder{}
	claims = append(claims, schema.NewSetAttributeClaim(perma, "camliContent", fileRef.String()))
	claims = append(claims, schema.NewSetAttributeClaim(perma, "title", mkTitle(p.Description)))
	claims = append(claims, schema.NewSetAttributeClaim(perma, "description", p.Description))
	for _, t := range p.Tags {
		claims = append(claims, schema.NewAddAttributeClaim(perma, "tag", t))
	}
	if p.Cat == "Public" {
		claims = append(claims, schema.NewSetAttributeClaim(perma, "camliAccess", "public"))
	}

	grp := syncutil.Group{}
	for _, claimBuilder := range claims {
		claim := claimBuilder.Blob()
		grp.Go(func() error {
			_, err := camliClient.UploadAndSignBlob(claim)
			return err
		})
	}

	return perma.String(), grp.Err()
}
예제 #22
0
func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) (err error) {
	var wg syncutil.Group

	for _, br := range blobs {
		br := br
		statGate.Start()
		wg.Go(func() error {
			defer statGate.Done()

			m, err := s.meta(br)
			if err == nil {
				dest <- m.SizedRef(br)
				return nil
			}
			if err == os.ErrNotExist {
				return nil
			}
			return err
		})
	}
	return wg.Err()
}
예제 #23
0
func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	if len(blobs) == 0 {
		return nil
	}

	var (
		grp        syncutil.Group
		trySmallMu sync.Mutex
		trySmall   []blob.Ref
	)
	statGate := syncutil.NewGate(50) // arbitrary
	for _, br := range blobs {
		br := br
		statGate.Start()
		grp.Go(func() error {
			defer statGate.Done()
			m, err := s.getMetaRow(br)
			if err != nil {
				return err
			}
			if m.exists {
				dest <- blob.SizedRef{Ref: br, Size: m.size}
			} else {
				trySmallMu.Lock()
				trySmall = append(trySmall, br)
				// Assume append cannot fail or panic
				trySmallMu.Unlock()
			}
			return nil
		})
	}
	if err := grp.Err(); err != nil {
		return err
	}
	if len(trySmall) == 0 {
		return nil
	}
	return s.small.StatBlobs(dest, trySmall)
}
예제 #24
0
// RemoveBlobs removes the blobs from index and pads data with zero bytes
func (s *storage) RemoveBlobs(blobs []blob.Ref) error {
	batch := s.index.BeginBatch()
	var wg syncutil.Group
	for _, br := range blobs {
		br := br
		removeGate.Start()
		batch.Delete(br.String())
		wg.Go(func() error {
			defer removeGate.Done()
			if err := s.delete(br); err != nil {
				return err
			}
			return nil
		})
	}
	err1 := wg.Err()
	err2 := s.index.CommitBatch(batch)
	if err1 != nil {
		return err1
	}
	return err2
}
예제 #25
0
func (sto *swiftStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	var wg syncutil.Group

	for _, br := range blobs {
		br := br
		statGate.Start()
		wg.Go(func() error {
			defer statGate.Done()

			obj, _, err := sto.client.Object(sto.container, br.String())
			if err == nil {
				dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Bytes)}
				return nil
			}
			if err == swift.ObjectNotFound {
				return nil
			}
			return fmt.Errorf("error statting %v: %v", br, err)
		})
	}
	return wg.Err()
}
예제 #26
0
파일: deploy.go 프로젝트: Micrap/camlistore
// getInstalledTLS returns the TLS certificate and key stored on Google Cloud Storage for the
// instance defined in d.Conf.
func (d *Deployer) getInstalledTLS() (certPEM, keyPEM []byte, err error) {
	ctx := cloud.NewContext(d.Conf.Project, d.Client)
	getFile := func(name string) ([]byte, error) {
		sr, err := cloudstorage.NewReader(ctx, d.Conf.bucketBase(),
			path.Join(configDir, name))
		if err != nil {
			return nil, err
		}
		defer sr.Close()
		return ioutil.ReadAll(sr)
	}
	var grp syncutil.Group
	grp.Go(func() (err error) {
		certPEM, err = getFile(certFilename)
		return
	})
	grp.Go(func() (err error) {
		keyPEM, err = getFile(keyFilename)
		return
	})
	err = grp.Err()
	return
}
예제 #27
0
func (s *storage) zipPartsInUse(br blob.Ref) ([]blob.Ref, error) {
	var (
		mu    sync.Mutex
		inUse []blob.Ref
	)
	var grp syncutil.Group
	gate := syncutil.NewGate(20) // arbitrary constant
	err := s.foreachZipBlob(br, func(bap BlobAndPos) error {
		gate.Start()
		grp.Go(func() error {
			defer gate.Done()
			mr, err := s.getMetaRow(bap.Ref)
			if err != nil {
				return err
			}
			if mr.isPacked() {
				mu.Lock()
				inUse = append(inUse, mr.largeRef)
				mu.Unlock()
			}
			return nil
		})
		return nil
	})
	if os.IsNotExist(err) {
		// An already-deleted blob from large isn't considered
		// to be in-use.
		return nil, nil
	}
	if err != nil {
		return nil, err
	}
	if err := grp.Err(); err != nil {
		return nil, err
	}
	return inUse, nil
}
예제 #28
0
func (r *run) importTweetsFromZip(userID string, zr *zip.Reader) error {
	log.Printf("Processing zip file with %d files", len(zr.File))

	tweetsNode, err := r.getTopLevelNode("tweets")
	if err != nil {
		return err
	}

	var (
		gate = syncutil.NewGate(tweetsAtOnce)
		grp  syncutil.Group
	)
	total := 0
	for _, zf := range zr.File {
		if !(strings.HasPrefix(zf.Name, "data/js/tweets/2") && strings.HasSuffix(zf.Name, ".js")) {
			continue
		}
		tweets, err := tweetsFromZipFile(zf)
		if err != nil {
			return fmt.Errorf("error reading tweets from %s: %v", zf.Name, err)
		}

		for i := range tweets {
			total++
			tweet := tweets[i]
			gate.Start()
			grp.Go(func() error {
				defer gate.Done()
				_, err := r.importTweet(tweetsNode, tweet, false)
				return err
			})
		}
	}
	err = grp.Err()
	log.Printf("zip import of tweets: %d total, err = %v", total, err)
	return err
}
예제 #29
0
func CheckEnumerate(sto blobserver.Storage, wantUnsorted []blob.SizedRef, opts ...interface{}) error {
	var after string
	var n = 1000
	for _, opt := range opts {
		switch v := opt.(type) {
		case string:
			after = v
		case int:
			n = v
		default:
			panic("bad option of type " + fmt.Sprintf("%T", v))
		}
	}

	want := append([]blob.SizedRef(nil), wantUnsorted...)
	sort.Sort(blob.SizedByRef(want))

	sbc := make(chan blob.SizedRef, 10)

	var got []blob.SizedRef
	var grp syncutil.Group
	sawEnd := make(chan bool, 1)
	grp.Go(func() error {
		ctx := context.New()
		defer ctx.Cancel()
		if err := sto.EnumerateBlobs(ctx, sbc, after, n); err != nil {
			return fmt.Errorf("EnumerateBlobs(%q, %d): %v", after, n, err)
		}
		return nil
	})
	grp.Go(func() error {
		var lastRef blob.Ref
		for sb := range sbc {
			if !sb.Valid() {
				return fmt.Errorf("invalid blobref %#v received in enumerate", sb)
			}
			got = append(got, sb)
			if lastRef.Valid() && sb.Ref.Less(lastRef) {
				return fmt.Errorf("blobs appearing out of order")
			}
			lastRef = sb.Ref
		}
		sawEnd <- true
		return nil

	})
	grp.Go(func() error {
		select {
		case <-sawEnd:
			return nil
		case <-time.After(10 * time.Second):
			return errors.New("timeout waiting for EnumerateBlobs to close its channel")
		}

	})
	if err := grp.Err(); err != nil {
		return fmt.Errorf("Enumerate error: %v", err)
	}
	if len(got) == 0 && len(want) == 0 {
		return nil
	}
	var gotSet = map[blob.SizedRef]bool{}
	for _, sb := range got {
		if gotSet[sb] {
			return fmt.Errorf("duplicate blob %v returned in enumerate", sb)
		}
		gotSet[sb] = true
	}

	if !reflect.DeepEqual(got, want) {
		return fmt.Errorf("Enumerate mismatch. Got %d; want %d.\n Got: %v\nWant: %v\n",
			len(got), len(want), got, want)
	}
	return nil
}
예제 #30
0
func (dr *DescribeRequest) describeReally(br blob.Ref, depth int) {
	meta, err := dr.sh.index.GetBlobMeta(br)
	if err == os.ErrNotExist {
		return
	}
	if err != nil {
		dr.addError(br, err)
		return
	}

	// TODO: convert all this in terms of
	// DescribedBlob/DescribedPermanode/DescribedFile, not json
	// maps.  Then add JSON marhsallers to those types. Add tests.
	des := dr.describedBlob(br)
	if meta.CamliType != "" {
		des.setMIMEType("application/json; camliType=" + meta.CamliType)
	}
	des.Size = int64(meta.Size)

	switch des.CamliType {
	case "permanode":
		des.Permanode = new(DescribedPermanode)
		dr.populatePermanodeFields(des.Permanode, br, dr.sh.owner, depth)
	case "file":
		fi, err := dr.sh.index.GetFileInfo(br)
		if err != nil {
			if os.IsNotExist(err) {
				log.Printf("index.GetFileInfo(file %s) failed; index stale?", br)
			} else {
				dr.addError(br, err)
			}
			return
		}
		des.File = &fi
		if des.File.IsImage() && !skipImageInfoLookup(des.File) {
			imgInfo, err := dr.sh.index.GetImageInfo(br)
			if err != nil {
				if os.IsNotExist(err) {
					log.Printf("index.GetImageInfo(file %s) failed; index stale?", br)
				} else {
					dr.addError(br, err)
				}
			} else {
				des.Image = &imgInfo
			}
		}
	case "directory":
		var g syncutil.Group
		g.Go(func() (err error) {
			fi, err := dr.sh.index.GetFileInfo(br)
			if os.IsNotExist(err) {
				log.Printf("index.GetFileInfo(directory %s) failed; index stale?", br)
			}
			if err == nil {
				des.Dir = &fi
			}
			return
		})
		g.Go(func() (err error) {
			des.DirChildren, err = dr.getDirMembers(br, depth)
			return
		})
		if err := g.Err(); err != nil {
			dr.addError(br, err)
		}
	}
}