コード例 #1
0
ファイル: camput.go プロジェクト: ipeet/camlistore
func (up *Uploader) UploadFile(filename string, rollSplits bool) (respr *client.PutResult, outerr os.Error) {
	up.getUploadToken()
	defer up.releaseUploadToken()

	fi, err := os.Lstat(filename)
	if err != nil {
		return nil, err
	}

	if up.statCache != nil && fi.IsRegular() {
		cachedRes, err := up.statCache.CachedPutResult(up.pwd, filename, fi)
		if err == nil {
			cachelog.Printf("Cache HIT on %q -> %v", filename, cachedRes)
			return cachedRes, nil
		}
		defer func() {
			if respr != nil && outerr == nil {
				up.statCache.AddCachedPutResult(up.pwd, filename, fi, respr)
			}
		}()
	}

	m := schema.NewCommonFileMap(filename, fi)

	switch {
	case fi.IsRegular():
		m["camliType"] = "file"

		file, err := os.Open(filename)
		if err != nil {
			return nil, err
		}
		defer file.Close()

		statReceiver := up.altStatReceiver
		if statReceiver == nil {
			// TODO(bradfitz): just make Client be a
			// StatReceiver? move remote's ReceiveBlob ->
			// Upload wrapper into Client itself?
			statReceiver = remote.NewFromClient(up.Client)
		}

		schemaWriteFileMap := schema.WriteFileMap
		if rollSplits {
			schemaWriteFileMap = schema.WriteFileMapRolling
		}
		blobref, err := schemaWriteFileMap(statReceiver, m, io.LimitReader(file, fi.Size))
		if err != nil {
			return nil, err
		}
		// TODO(bradfitz): taking a PutResult here is kinda
		// gross.  should instead make a blobserver.Storage
		// wrapper type that can track some of this?  or that
		// updates the client stats directly or something.
		{
			json, _ := schema.MapToCamliJson(m)
			pr := &client.PutResult{BlobRef: blobref, Size: int64(len(json)), Skipped: false}
			return pr, nil
		}
	case fi.IsSymlink():
		if err = schema.PopulateSymlinkMap(m, filename); err != nil {
			return nil, err
		}
	case fi.IsDirectory():
		ss := new(schema.StaticSet)
		dir, err := os.Open(filename)
		if err != nil {
			return nil, err
		}
		dirNames, err := dir.Readdirnames(-1)
		if err != nil {
			return nil, err
		}
		dir.Close()
		sort.Strings(dirNames)

		// Temporarily give up our upload token while we
		// process all our children.  The defer function makes
		// sure we re-acquire it (keeping balance in the
		// world) before we return.
		up.releaseUploadToken()
		tokenTookBack := false
		defer func() {
			if !tokenTookBack {
				up.getUploadToken()
			}
		}()

		rate := make(chan bool, 100) // max outstanding goroutines, further limited by filecapc
		type nameResult struct {
			name   string
			putres *client.PutResult
			err    os.Error
		}

		resc := make(chan nameResult, buffered)
		go func() {
			for _, name := range dirNames {
				rate <- true
				go func(dirEntName string) {
					pr, err := up.UploadFile(filename+"/"+dirEntName, rollSplits)
					if pr == nil && err == nil {
						log.Fatalf("nil/nil from up.UploadFile on %q", filename+"/"+dirEntName)
					}
					resc <- nameResult{dirEntName, pr, err}
					<-rate
				}(name)
			}
		}()
		resm := make(map[string]*client.PutResult)
		var entUploadErr os.Error
		for _ = range dirNames {
			r := <-resc
			if r.err != nil {
				entUploadErr = fmt.Errorf("error uploading %s: %v", r.name, r.err)
				continue
			}
			resm[r.name] = r.putres
		}
		if entUploadErr != nil {
			return nil, entUploadErr
		}
		for _, name := range dirNames {
			ss.Add(resm[name].BlobRef)
		}

		// Re-acquire the upload token that we temporarily yielded up above.
		up.getUploadToken()
		tokenTookBack = true

		sspr, err := up.UploadMap(ss.Map())
		if err != nil {
			return nil, err
		}
		schema.PopulateDirectoryMap(m, sspr.BlobRef)
	case fi.IsBlock():
		fallthrough
	case fi.IsChar():
		fallthrough
	case fi.IsSocket():
		fallthrough
	case fi.IsFifo():
		fallthrough
	default:
		return nil, schema.ErrUnimplemented
	}

	mappr, err := up.UploadMap(m)
	if err == nil {
		vlog.Printf("Uploaded %q, %s for %s", m["camliType"], mappr.BlobRef, filename)
	} else {
		vlog.Printf("Error uploading map %v: %v", m, err)
	}
	return mappr, err
}
コード例 #2
0
ファイル: camput.go プロジェクト: marsch/camlistore
func (up *Uploader) UploadFile(filename string) (*client.PutResult, os.Error) {
	fi, err := os.Lstat(filename)
	if err != nil {
		return nil, err
	}

	m := schema.NewCommonFileMap(filename, fi)

	switch {
	case fi.IsRegular():
		// Put the blob of the file itself.  (TODO: smart boundary chunking)
		// For now we just store it as one range.
		blobpr, err := up.UploadFileBlob(filename)
		if err != nil {
			return nil, err
		}
		parts := []schema.ContentPart{{BlobRef: blobpr.BlobRef, Size: blobpr.Size}}
		if blobpr.Size != fi.Size {
			// TODO: handle races of file changing while reading it
			// after the stat.
		}
		if err = schema.PopulateRegularFileMap(m, fi, parts); err != nil {
			return nil, err
		}
	case fi.IsSymlink():
		if err = schema.PopulateSymlinkMap(m, filename); err != nil {
			return nil, err
		}
	case fi.IsDirectory():
		ss := new(schema.StaticSet)
		dir, err := os.Open(filename, os.O_RDONLY, 0)
		if err != nil {
			return nil, err
		}
		dirNames, err := dir.Readdirnames(-1)
		if err != nil {
			return nil, err
		}
		dir.Close()
		sort.SortStrings(dirNames)
		// TODO: process dirName entries in parallel
		for _, dirEntName := range dirNames {
			pr, err := up.UploadFile(filename + "/" + dirEntName)
			if err != nil {
				return nil, err
			}
			ss.Add(pr.BlobRef)
		}
		sspr, err := up.UploadMap(ss.Map())
		if err != nil {
			return nil, err
		}
		schema.PopulateDirectoryMap(m, sspr.BlobRef)
	case fi.IsBlock():
		fallthrough
	case fi.IsChar():
		fallthrough
	case fi.IsSocket():
		fallthrough
	case fi.IsFifo():
		fallthrough
	default:
		return nil, schema.UnimplementedError
	}

	mappr, err := up.UploadMap(m)
	return mappr, err
}