func ToNid(key uint64, cookie uint32) string { bytes := make([]byte, 12) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], cookie) nonzero_index := 0 for ; bytes[nonzero_index] == 0; nonzero_index++ { } return hex.EncodeToString(bytes[nonzero_index:]) }
func (n *FileId) String() string { bytes := make([]byte, 12) util.Uint64toBytes(bytes[0:8], n.Key) util.Uint32toBytes(bytes[8:12], n.Hashcode) nonzero_index := 0 for ; bytes[nonzero_index] == 0; nonzero_index++ { } return n.VolumeId.String() + "," + hex.EncodeToString(bytes[nonzero_index:]) }
func levelDbWrite(db *leveldb.DB, key uint64, offset uint32, size uint32) error { bytes := make([]byte, 16) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], offset) util.Uint32toBytes(bytes[12:16], size) if err := db.Put(bytes[0:8], bytes[8:16], nil); err != nil { return fmt.Errorf("failed to write leveldb: %v", err) } return nil }
func (m *LevelDbNeedleMap) Get(key uint64) (element *NeedleValue, ok bool) { bytes := make([]byte, 8) util.Uint64toBytes(bytes, key) data, err := m.db.Get(bytes, nil) if err != nil || len(data) != 8 { return nil, false } offset := util.BytesToUint32(data[0:4]) size := util.BytesToUint32(data[4:8]) return &NeedleValue{Key: Key(key), Offset: offset, Size: size}, true }
func (nm *baseNeedleMapper) appendToIndexFile(key uint64, offset uint32, size uint32) error { bytes := make([]byte, 16) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], offset) util.Uint32toBytes(bytes[12:16], size) nm.mutex.Lock() defer nm.mutex.Unlock() if _, err := nm.indexFile.Seek(0, 2); err != nil { return fmt.Errorf("cannot seek end of indexfile %s: %v", nm.indexFile.Name(), err) } _, err := nm.indexFile.Write(bytes) return err }
func boltDbDelete(db *bolt.DB, key uint64) error { bytes := make([]byte, 8) util.Uint64toBytes(bytes, key) return db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists(boltdbBucket) if err != nil { return err } err = bucket.Delete(bytes) if err != nil { return err } return nil }) }
func boltDbWrite(db *bolt.DB, key uint64, offset uint32, size uint32) error { bytes := make([]byte, 16) util.Uint64toBytes(bytes[0:8], key) util.Uint32toBytes(bytes[8:12], offset) util.Uint32toBytes(bytes[12:16], size) return db.Update(func(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists(boltdbBucket) if err != nil { return err } err = bucket.Put(bytes[0:8], bytes[8:16]) if err != nil { return err } return nil }) }
func (m *BoltDbNeedleMap) Get(key uint64) (element *NeedleValue, ok bool) { bytes := make([]byte, 8) var data []byte util.Uint64toBytes(bytes, key) err := m.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(boltdbBucket) if bucket == nil { return fmt.Errorf("Bucket %q not found!", boltdbBucket) } data = bucket.Get(bytes) return nil }) if err != nil || len(data) != 8 { return nil, false } offset := util.BytesToUint32(data[0:4]) size := util.BytesToUint32(data[4:8]) return &NeedleValue{Key: Key(key), Offset: offset, Size: size}, true }
func levelDbDelete(db *leveldb.DB, key uint64) error { bytes := make([]byte, 8) util.Uint64toBytes(bytes, key) return db.Delete(bytes, nil) }
func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) { if s, ok := w.(io.Seeker); ok { if end, e := s.Seek(0, 1); e == nil { defer func(s io.Seeker, off int64) { if err != nil { if _, e = s.Seek(off, 0); e != nil { glog.V(0).Infof("Failed to seek %s back to %d with error: %v", w, off, e) } } }(s, end) } else { err = fmt.Errorf("Cannot Read Current Volume Position: %v", e) return } } switch version { case Version1: header := make([]byte, NeedleHeaderSize) util.Uint32toBytes(header[0:4], n.Cookie) util.Uint64toBytes(header[4:12], n.Id) n.Size = uint32(len(n.Data)) size = n.Size util.Uint32toBytes(header[12:16], n.Size) if _, err = w.Write(header); err != nil { return } if _, err = w.Write(n.Data); err != nil { return } padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) _, err = w.Write(header[0 : NeedleChecksumSize+padding]) return case Version2: header := make([]byte, NeedleHeaderSize) util.Uint32toBytes(header[0:4], n.Cookie) util.Uint64toBytes(header[4:12], n.Id) n.DataSize, n.NameSize, n.MimeSize = uint32(len(n.Data)), uint8(len(n.Name)), uint8(len(n.Mime)) if n.DataSize > 0 { n.Size = 4 + n.DataSize + 1 if n.HasName() { n.Size = n.Size + 1 + uint32(n.NameSize) } if n.HasMime() { n.Size = n.Size + 1 + uint32(n.MimeSize) } if n.HasLastModifiedDate() { n.Size = n.Size + LastModifiedBytesLength } if n.HasTtl() { n.Size = n.Size + TtlBytesLength } } else { n.Size = 0 } size = n.DataSize util.Uint32toBytes(header[12:16], n.Size) if _, err = w.Write(header); err != nil { return } if n.DataSize > 0 { util.Uint32toBytes(header[0:4], n.DataSize) if _, err = w.Write(header[0:4]); err != nil { return } if _, err = w.Write(n.Data); err != nil { return } util.Uint8toBytes(header[0:1], n.Flags) if _, err = w.Write(header[0:1]); err != nil { return } if n.HasName() { util.Uint8toBytes(header[0:1], n.NameSize) if _, err = w.Write(header[0:1]); err != nil { return } if _, err = w.Write(n.Name); err != nil { return } } if n.HasMime() { util.Uint8toBytes(header[0:1], n.MimeSize) if _, err = w.Write(header[0:1]); err != nil { return } if _, err = w.Write(n.Mime); err != nil { return } } if n.HasLastModifiedDate() { util.Uint64toBytes(header[0:8], n.LastModified) if _, err = w.Write(header[8-LastModifiedBytesLength : 8]); err != nil { return } } if n.HasTtl() && n.Ttl != nil { n.Ttl.ToBytes(header[0:TtlBytesLength]) if _, err = w.Write(header[0:TtlBytesLength]); err != nil { return } } } padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) _, err = w.Write(header[0 : NeedleChecksumSize+padding]) return n.DataSize, err } return 0, fmt.Errorf("Unsupported Version! (%d)", version) }
func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) { var indexSize int64 oldIdxFile, err := os.Open(oldIdxFileName) defer oldIdxFile.Close() oldDatFile, err := os.Open(oldDatFileName) defer oldDatFile.Close() if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil { return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err) } if indexSize == 0 || uint64(indexSize) <= v.lastCompactIndexOffset { return nil } oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatFile) if err != nil { return } if oldDatCompactRevision != v.lastCompactRevision { return fmt.Errorf("current old dat file's compact revision %d is not the expected one %d", oldDatCompactRevision, v.lastCompactRevision) } type keyField struct { offset uint32 size uint32 } incrementedHasUpdatedIndexEntry := make(map[uint64]keyField) for idx_offset := indexSize - NeedleIndexSize; uint64(idx_offset) >= v.lastCompactIndexOffset; idx_offset -= NeedleIndexSize { var IdxEntry []byte if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idx_offset); err != nil { return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idx_offset, err) } key, offset, size := idxFileEntry(IdxEntry) if _, found := incrementedHasUpdatedIndexEntry[key]; !found { incrementedHasUpdatedIndexEntry[key] = keyField{ offset: offset, size: size, } } } if len(incrementedHasUpdatedIndexEntry) > 0 { var ( dst, idx *os.File ) if dst, err = os.OpenFile(newDatFileName, os.O_RDWR, 0644); err != nil { return } defer dst.Close() if idx, err = os.OpenFile(newIdxFileName, os.O_RDWR, 0644); err != nil { return } defer idx.Close() var newDatCompactRevision uint16 newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dst) if err != nil { return } if oldDatCompactRevision+1 != newDatCompactRevision { return fmt.Errorf("oldDatFile %s 's compact revision is %d while newDatFile %s 's compact revision is %d", oldDatFileName, oldDatCompactRevision, newDatFileName, newDatCompactRevision) } idx_entry_bytes := make([]byte, 16) for key, incre_idx_entry := range incrementedHasUpdatedIndexEntry { util.Uint64toBytes(idx_entry_bytes[0:8], key) util.Uint32toBytes(idx_entry_bytes[8:12], incre_idx_entry.offset) util.Uint32toBytes(idx_entry_bytes[12:16], incre_idx_entry.size) var offset int64 if offset, err = dst.Seek(0, 2); err != nil { glog.V(0).Infof("failed to seek the end of file: %v", err) return } //ensure file writing starting from aligned positions if offset%NeedlePaddingSize != 0 { offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize) if offset, err = v.dataFile.Seek(offset, 0); err != nil { glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err) return } } //updated needle if incre_idx_entry.offset != 0 && incre_idx_entry.size != 0 { //even the needle cache in memory is hit, the need_bytes is correct var needle_bytes []byte needle_bytes, _, err = ReadNeedleBlob(oldDatFile, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size) if err != nil { return } dst.Write(needle_bytes) util.Uint32toBytes(idx_entry_bytes[8:12], uint32(offset/NeedlePaddingSize)) } else { //deleted needle //fakeDelNeedle 's default Data field is nil fakeDelNeedle := new(Needle) fakeDelNeedle.Id = key fakeDelNeedle.Cookie = 0x12345678 _, err = fakeDelNeedle.Append(dst, v.Version()) if err != nil { return } util.Uint32toBytes(idx_entry_bytes[8:12], uint32(0)) } if _, err := idx.Seek(0, 2); err != nil { return fmt.Errorf("cannot seek end of indexfile %s: %v", newIdxFileName, err) } _, err = idx.Write(idx_entry_bytes) } } return nil }