Esempio n. 1
0
// readIndex calls x.Read with the index's backing file.
func readIndex(fs rwvfs.FileSystem, name string, x persistedIndex) (err error) {
	vlog.Printf("%s: reading index...", name)
	var f vfs.ReadSeekCloser
	f, err = fs.Open(fmt.Sprintf(indexFilename, name))
	if err != nil {
		vlog.Printf("%s: failed to read index: %s.", name, err)
		if os.IsNotExist(err) {
			return &errIndexNotExist{name: name, err: err}
		}
		return err
	}
	defer func() {
		err2 := f.Close()
		if err == nil {
			err = err2
		}
	}()

	r, err := gzip.NewReader(f)
	if err != nil {
		return err
	}

	if err := x.Read(r); err != nil {
		return err
	}
	if err := r.Close(); err != nil {
		return err
	}
	vlog.Printf("%s: done reading index.", name)
	return nil
}
Esempio n. 2
0
// addFile adds a file to the index if possible and returns the file set file
// and the file's AST if it was successfully parsed as a Go file. If addFile
// failed (that is, if the file was not added), it returns file == nil.
func (x *Indexer) addFile(f vfs.ReadSeekCloser, filename string, goFile bool) (file *token.File, ast *ast.File) {
	defer f.Close()

	// The file set's base offset and x.sources size must be in lock-step;
	// this permits the direct mapping of suffix array lookup results to
	// to corresponding Pos values.
	//
	// When a file is added to the file set, its offset base increases by
	// the size of the file + 1; and the initial base offset is 1. Add an
	// extra byte to the sources here.
	x.sources.WriteByte(0)

	// If the sources length doesn't match the file set base at this point
	// the file set implementation changed or we have another error.
	base := x.fset.Base()
	if x.sources.Len() != base {
		panic("internal error: file base incorrect")
	}

	// append file contents (src) to x.sources
	if _, err := x.sources.ReadFrom(f); err == nil {
		src := x.sources.Bytes()[base:]

		if goFile {
			// parse the file and in the process add it to the file set
			if ast, err = parser.ParseFile(x.fset, filename, src, parser.ParseComments); err == nil {
				file = x.fset.File(ast.Pos()) // ast.Pos() is inside the file
				return
			}
			// file has parse errors, and the AST may be incorrect -
			// set lines information explicitly and index as ordinary
			// text file (cannot fall through to the text case below
			// because the file has already been added to the file set
			// by the parser)
			file = x.fset.File(token.Pos(base)) // token.Pos(base) is inside the file
			file.SetLinesForContent(src)
			ast = nil
			return
		}

		if util.IsText(src) {
			// only add the file to the file set (for the full text index)
			file = x.fset.AddFile(filename, x.fset.Base(), len(src))
			file.SetLinesForContent(src)
			return
		}
	}

	// discard possibly added data
	x.sources.Truncate(base - 1) // -1 to remove added byte 0 since no file was added
	return
}
Esempio n. 3
0
// walkFiles recursively descends path, calling walkFn.
// It closes the input file after it's done with it, so the caller shouldn't.
func walkFiles(fs http.FileSystem, path string, info os.FileInfo, file vfs.ReadSeekCloser, walkFn WalkFilesFunc) error {
	err := walkFn(path, info, file, nil)
	file.Close()
	if err != nil {
		if info.IsDir() && err == filepath.SkipDir {
			return nil
		}
		return err
	}

	if !info.IsDir() {
		return nil
	}

	names, err := readDirNames(fs, path)
	if err != nil {
		return walkFn(path, info, nil, err)
	}

	for _, name := range names {
		filename := pathpkg.Join(path, name)
		file, fileInfo, err := openStat(fs, filename)
		if err != nil {
			if err := walkFn(filename, nil, nil, err); err != nil && err != filepath.SkipDir {
				return err
			}
		} else {
			err = walkFiles(fs, filename, fileInfo, file, walkFn)
			// file is closed by walkFiles, so we don't need to close it here.
			if err != nil {
				if !fileInfo.IsDir() || err != filepath.SkipDir {
					return err
				}
			}
		}
	}
	return nil
}
Esempio n. 4
0
func testOpen(t *testing.T, fs rwvfs.FileSystem) {
	const path = "testOpen"

	var buf bytes.Buffer
	for i := uint8(0); i < 255; i++ {
		for j := uint8(0); j < 255; j++ {
			buf.Write([]byte{i, j})
		}
	}
	fullData := []byte(base64.StdEncoding.EncodeToString(buf.Bytes()))[10:]
	fullLen := int64(len(fullData))
	createFile(t, fs, path, fullData)

	{
		// Full reads.
		f, err := fs.Open(path)
		if err != nil {
			t.Fatal(err)
		}
		b, err := ioutil.ReadAll(f)
		if err != nil {
			t.Fatal(err)
		}
		if err := f.Close(); err != nil {
			t.Fatal(err)
		}

		if !bytes.Equal(b, fullData) {
			t.Errorf("full read: got %q, want %q", b, fullData)
		}
	}

	{
		// Partial reads.
		rrt := &rangeRecordingTransport{}
		fs.(*S3FS).config.Client = &http.Client{Transport: rrt}

		var f vfs.ReadSeekCloser

		cases := [][2]int64{
			{0, 0},
			{0, 1},
			{0, 2},
			{1, 1},
			{0, 3},
			{1, 3},
			{2, 3},
			{0, 2},
			{0, 3},
			{3, 4},
			{0, fullLen / 2},
			{1, fullLen / 2},
			{fullLen / 3, fullLen / 2},
			{0, fullLen - 1},
			{1, fullLen - 1},
			{fullLen / 2, fullLen/2 + 1333},
			{fullLen / 2, fullLen/2 + 1},
			{fullLen / 2, fullLen/2 + 2},
			{fullLen / 2, fullLen / 2},
			{fullLen - 10, fullLen - 1},
		}
		for _, autofetch := range []bool{false, true} {
			for _, reuse := range []bool{false, true} {
				for i, c := range cases {
					if !reuse || i == 0 {
						var err error
						f, err = fs.(rwvfs.FetcherOpener).OpenFetcher(path)
						if err != nil {
							t.Fatal(err)
						}
					}

					f.(interface {
						SetAutofetch(bool)
					}).SetAutofetch(true)

					rrt.reset()

					start, end := c[0], c[1]
					label := fmt.Sprintf("range %d-%d (autofetch=%v, reuse=%v)", start, end, autofetch, reuse)

					fetchEnd := end
					if autofetch {
						// Short fetch.
						fetchEnd = (start + end) / 2
						if fetchEnd < start {
							fetchEnd = end
						}
					}
					if err := f.(rwvfs.Fetcher).Fetch(start, fetchEnd); err != nil {
						t.Error(err)
						continue
					}

					n, err := f.Seek(start, 0)
					if err != nil {
						t.Errorf("%s: %s", label, err)
						continue
					}
					if n != start {
						t.Errorf("got post-Seek offset %d, want %d", n, start)
					}
					b, err := ioutil.ReadAll(io.LimitReader(f, end-start))
					if err != nil {
						t.Errorf("%s: ReadAll: %s", label, err)
						continue
					}

					trunc := func(b []byte) string {
						if len(b) > 75 {
							return string(b[:75]) + "..." + string(b[len(b)-5:]) + fmt.Sprintf(" (%d bytes total)", len(b))
						}
						return string(b)
					}
					if want := fullData[start:end]; !bytes.Equal(b, want) {
						t.Errorf("%s: full read: got %q, want %q", label, trunc(b), trunc(want))
						continue
					}

					if start != end && !reuse {
						if len(rrt.readRanges) == 0 {
							t.Errorf("%s: no read ranges, want range %d-%d", label, start, end)
						}
					}
					if !autofetch {
						if err := rrt.checkOnlyReadRange(start, end); err != nil {
							t.Errorf("%s: %s", label, err)
						}
					}

					if !reuse || i == len(cases)-1 {
						if err := f.Close(); err != nil {
							t.Fatal(err)
						}
					}
				}
			}
		}
	}
}
Esempio n. 5
0
func main() {
	log.SetFlags(0)
	flag.Parse()

	if flag.NArg() != 2 {
		log.Fatal("error: usage: httpvfs-client [opts] <cat|ls|put|rm> <path>")
	}
	op := flag.Arg(0)
	path := path.Clean(flag.Arg(1))

	url, err := url.Parse(*urlStr)
	if err != nil {
		log.Fatal(err)
	}

	fs := rwvfs.HTTP(url, nil)

	switch strings.ToLower(op) {
	case "cat":
		if *startByte != 0 && *endByte < *startByte {
			log.Fatal("error: -end-byte must be greater than -start-byte")
		}

		var f vfs.ReadSeekCloser
		var err error

		if *startByte != 0 {
			f, err = fs.(rwvfs.FetcherOpener).OpenFetcher(path)
		} else {
			f, err = fs.Open(path)
		}
		if err != nil {
			log.Fatal(err)
		}
		defer func() {
			if err := f.Close(); err != nil {
				log.Fatal(err)
			}
		}()

		rdr := io.Reader(f)

		if *startByte != 0 {
			if err := f.(rwvfs.Fetcher).Fetch(int64(*startByte), int64(*endByte)); err != nil {
				log.Fatalf("Fetch bytes=%d-%d: %s", *startByte, *endByte, err)
			}
		}

		if *startByte != 0 {
			if _, err := f.Seek(int64(*startByte), 0); err != nil {
				log.Fatalln("Seek:", err)
			}
		}
		if *endByte != -1 {
			byteLen := *endByte - *startByte
			rdr = io.LimitReader(f, int64(byteLen))
		}

		if _, err := io.Copy(os.Stdout, rdr); err != nil {
			log.Fatalln("Copy:", err)
		}

	case "ls":
		fis, err := fs.ReadDir(path)
		if err != nil {
			log.Fatal(err)
		}

		var longestNameLen int
		for _, fi := range fis {
			if len(fi.Name()) > longestNameLen {
				longestNameLen = len(fi.Name())
			}
		}
		longestNameLen++ // account for "/" suffix on dirs

		for _, fi := range fis {
			name := fi.Name()
			if fi.IsDir() {
				name += "/"
			}
			mtime := fi.ModTime().Round(time.Second)
			fmt.Printf("%-*s   %s   %d\n", longestNameLen, name, mtime, fi.Size())
		}

	case "put":
		f, err := fs.Create(path)
		if err != nil {
			log.Fatal(err)
		}
		defer func() {
			if err := f.Close(); err != nil {
				log.Fatal(err)
			}
		}()
		log.Println("(reading file data on stdin...)")
		if _, err := io.Copy(f, os.Stdin); err != nil {
			log.Fatal(err)
		}

	case "rm":
		if err := fs.Remove(path); err != nil {
			log.Fatal(err)
		}

	default:
		log.Fatal("error: invalid op (see -h)")
	}
}