Пример #1
0
// NewKeyValue returns a KeyValue implementation on top of a
// github.com/cznic/kv file.
func NewKeyValue(cfg jsonconfig.Obj) (sorted.KeyValue, error) {
	file := cfg.RequiredString("file")
	if err := cfg.Validate(); err != nil {
		return nil, err
	}
	createOpen := kv.Open
	verb := "opening"
	if _, err := os.Stat(file); os.IsNotExist(err) {
		createOpen = kv.Create
		verb = "creating"
	}
	opts := &kv.Options{
		Locker: func(dbname string) (io.Closer, error) {
			lkfile := dbname + ".lock"
			cl, err := lock.Lock(lkfile)
			if err != nil {
				return nil, fmt.Errorf("failed to acquire lock on %s: %v", lkfile, err)
			}
			return cl, nil
		},
	}
	db, err := createOpen(file, opts)
	if err != nil {
		return nil, fmt.Errorf("error %s %s: %v", verb, file, err)
	}
	is := &kvis{
		db:   db,
		opts: opts,
		path: file,
	}
	return is, nil
}
Пример #2
0
// Open opens the named kv DB file for reading/writing. It
// creates the file if it does not exist yet.
func Open(dbFile string, opts *kv.Options) (*kv.DB, error) {
	createOpen := kv.Open
	verb := "opening"
	if _, err := os.Stat(dbFile); os.IsNotExist(err) {
		createOpen = kv.Create
		verb = "creating"
	}
	if opts == nil {
		opts = &kv.Options{}
	}
	if opts.Locker == nil {
		opts.Locker = func(dbFile string) (io.Closer, error) {
			lkfile := dbFile + ".lock"
			cl, err := lock.Lock(lkfile)
			if err != nil {
				return nil, fmt.Errorf("failed to acquire lock on %s: %v", lkfile, err)
			}
			return cl, nil
		}
	}
	if v, _ := strconv.ParseBool(os.Getenv("CAMLI_KV_VERIFY")); v {
		opts.VerifyDbBeforeOpen = true
		opts.VerifyDbAfterOpen = true
		opts.VerifyDbBeforeClose = true
		opts.VerifyDbAfterClose = true
	}
	db, err := createOpen(dbFile, opts)
	if err != nil {
		return nil, fmt.Errorf("error %s %s: %v", verb, dbFile, err)
	}
	return db, nil
}
Пример #3
0
// Open opens the named kv DB file for reading/writing. It
// creates the file if it does not exist yet.
func Open(filePath string, opts *kv.Options) (*kv.DB, error) {
	// TODO(mpl): use it in index pkg and such
	createOpen := kv.Open
	if _, err := os.Stat(filePath); os.IsNotExist(err) {
		createOpen = kv.Create
	}
	if opts == nil {
		opts = &kv.Options{}
	}
	if opts.Locker == nil {
		opts.Locker = func(fullPath string) (io.Closer, error) {
			return lock.Lock(filePath + ".lock")
		}
	}
	return createOpen(filePath, opts)
}
Пример #4
0
func NewStorage(file string) (index.Storage, io.Closer, error) {
	createOpen := kv.Open
	if _, err := os.Stat(file); os.IsNotExist(err) {
		createOpen = kv.Create
	}
	db, err := createOpen(file, &kv.Options{
		Locker: func(dbname string) (io.Closer, error) {
			lkfile := dbname + ".lock"
			return lock.Lock(lkfile)
		},
	})
	if err != nil {
		return nil, nil, err
	}
	is := &kvis{
		db: db,
	}
	return is, struct{ io.Closer }{db}, nil
}
Пример #5
0
// openForWrite will create or open pack file n for writes, create a lock
// visible external to the process and seek to the end of the file ready for
// appending new data.
// This function is not thread safe, s.mu should be locked by the caller.
func (s *storage) openForWrite(n int) error {
	fn := s.filename(n)
	l, err := lock.Lock(fn + ".lock")
	if err != nil {
		return err
	}
	f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0666)
	if err != nil {
		l.Close()
		return err
	}
	openFdsVar.Add(s.root, 1)
	debug.Printf("diskpacked: opened for write %q", fn)

	s.size, err = f.Seek(0, os.SEEK_END)
	if err != nil {
		return err
	}

	s.writer = f
	s.writeLock = l
	return nil
}
Пример #6
0
// openCurrent makes sure the current data file is open as s.current.
func (s *storage) openCurrent() error {
	if s.current == nil {
		// First run; find the latest file data file and open it
		// and seek to the end.
		// If no data files exist, leave s.current as nil.
		for {
			_, err := os.Stat(s.filename(s.currentN))
			if os.IsNotExist(err) {
				break
			}
			if err != nil {
				return err
			}
			s.currentN++
		}
		if s.currentN > 0 {
			s.currentN--
			l, err := lock.Lock(s.filename(s.currentN) + ".lock")
			if err != nil {
				return err
			}
			f, err := os.OpenFile(s.filename(s.currentN), os.O_RDWR, 0666)
			if err != nil {
				l.Close()
				return err
			}
			o, err := f.Seek(0, os.SEEK_END)
			if err != nil {
				l.Close()
				return err
			}
			s.current, s.currentL, s.currentO = f, l, o
		}
	}

	// If s.current is open and it's too big,close it and advance currentN.
	if s.current != nil && s.currentO > s.maxFileSize {
		f, l := s.current, s.currentL
		s.current, s.currentL, s.currentO = nil, nil, 0
		s.currentN++
		if err := f.Close(); err != nil {
			l.Close()
			return err
		}
		if err := l.Close(); err != nil {
			return err
		}
	}

	// If we don't have the current file open, make one.
	if s.current == nil {
		l, err := lock.Lock(s.filename(s.currentN) + ".lock")
		if err != nil {
			return err
		}
		f, err := os.Create(s.filename(s.currentN))
		if err != nil {
			l.Close()
			return err
		}
		s.current, s.currentL, s.currentO = f, l, 0
	}
	return nil
}
Пример #7
0
func reindexOne(index sorted.KeyValue, overwrite, verbose bool, r io.ReadSeeker, name string, packId int64) error {
	l, err := lock.Lock(name + ".lock")
	defer l.Close()

	var pos, size int64

	errAt := func(prefix, suffix string) error {
		if prefix != "" {
			prefix = prefix + " "
		}
		if suffix != "" {
			suffix = " " + suffix
		}
		return fmt.Errorf(prefix+"at %d (0x%x) in %q:"+suffix, pos, pos, name)
	}

	var batch sorted.BatchMutation
	if overwrite {
		batch = index.BeginBatch()
	}

	allOk := true
	br := bufio.NewReaderSize(r, 512)
	for {
		if b, err := br.ReadByte(); err != nil {
			if err == io.EOF {
				break
			}
			return errAt("error while reading", err.Error())
		} else if b != '[' {
			return errAt(fmt.Sprintf("found byte 0x%x", b), "but '[' should be here!")
		}
		chunk, err := br.ReadSlice(']')
		if err != nil {
			if err == io.EOF {
				break
			}
			return errAt("error reading blob header", err.Error())
		}
		m := len(chunk)
		chunk = chunk[:m-1]
		i := bytes.IndexByte(chunk, byte(' '))
		if i <= 0 {
			return errAt("", fmt.Sprintf("bad header format (no space in %q)", chunk))
		}
		if size, err = strconv.ParseInt(string(chunk[i+1:]), 10, 64); err != nil {
			return errAt(fmt.Sprintf("cannot parse size %q as int", chunk[i+1:]), err.Error())
		}
		ref, ok := blob.Parse(string(chunk[:i]))
		if !ok {
			return errAt("", fmt.Sprintf("cannot parse %q as blobref", chunk[:i]))
		}
		if verbose {
			log.Printf("found %s at %d", ref, pos)
		}

		meta := blobMeta{packId, pos + 1 + int64(m), size}.String()
		if overwrite && batch != nil {
			batch.Set(ref.String(), meta)
		} else {
			if old, err := index.Get(ref.String()); err != nil {
				allOk = false
				if err == sorted.ErrNotFound {
					log.Println(ref.String() + ": cannot find in index!")
				} else {
					log.Println(ref.String()+": error getting from index: ", err.Error())
				}
			} else if old != meta {
				allOk = false
				log.Printf("%s: index mismatch - index=%s data=%s", ref.String(), old, meta)
			}
		}

		pos += 1 + int64(m)
		// TODO(tgulacsi78): not just seek, but check the hashes of the files
		// maybe with a different command-line flag, only.
		if pos, err = r.Seek(pos+size, 0); err != nil {
			return errAt("", "cannot seek +"+strconv.FormatInt(size, 10)+" bytes")
		}
		// drain the buffer after the underlying reader Seeks
		io.CopyN(ioutil.Discard, br, int64(br.Buffered()))
	}

	if overwrite && batch != nil {
		log.Printf("overwriting %s from %s", index, name)
		if err = index.CommitBatch(batch); err != nil {
			return err
		}
	} else if !allOk {
		return fmt.Errorf("index does not match data in %q", name)
	}
	return nil
}