func ZlibCompress(src bytearray.ByteArray) (dst bytearray.ByteArray) { src.ReadSeek(0, bytearray.SEEK_SET) zw := zpool.GetWriter(&dst) CopyOrPanic(zw, &src) zw.Close() zpool.PutWriter(zw) return dst }
func ZlibUncompress(src bytearray.ByteArray) (dst bytearray.ByteArray) { src.ReadSeek(0, bytearray.SEEK_SET) zr, err := zlib.NewReader(&src) if err != nil { panic(err) } defer zr.Close() CopyOrPanic(&dst, zr) return dst }
func (session *BackupSession) storeFile(path string, entry *FileEntry) (err error) { defer func() { // Panic error handling if r := recover(); r != nil { // we need this because some obscure files on OSX does open but then generates "bad file descriptor" on read if e, ok := r.(*os.PathError); ok && e.Err == syscall.EBADF { err = e.Err } else { panic(r) // Any other error is not normal and should panic } } }() var links []core.Byte128 chain := FileChainBlock{} var file *os.File if file, err = os.Open(path); err != nil { return err } defer file.Close() var maxSum rollsum.Rollsum maxSum.Init() var fileData bytearray.ByteArray defer fileData.Release() for offset := int64(0); offset < int64(entry.FileSize); { Debug("storeFile(%s) offset %d", path, offset) session.PrintStoreProgress(PROGRESS_INTERVAL_SECS) var left int64 = int64(entry.FileSize) - offset var maxBlockSize int = MAX_BLOCK_SIZE if left < int64(maxBlockSize) { maxBlockSize = int(left) } var blockData bytearray.ByteArray // Fill the fileData buffer core.CopyNOrPanic(&fileData, file, maxBlockSize-fileData.Len()) fileData.ReadSeek(0, os.SEEK_CUR) // TODO: figure out why this line is here because I do not remember var splitPosition int = fileData.Len() if fileData.Len() > MIN_BLOCK_SIZE*2 { // Candidate for rolling sum split rollIn, rollOut := fileData, fileData // Shallow copy the file data rollInBase, rollOutBase := 0, 0 rollInPos, rollOutPos := 0, 0 rollInSlice, _ := rollIn.ReadSlice() rollOutSlice, _ := rollOut.ReadSlice() partSum := maxSum var maxd = uint32(0) for rollInPos < fileData.Len() { if rollInPos-rollInBase >= len(rollInSlice) { // Next slice please rollInBase, _ = rollIn.ReadSeek(len(rollInSlice), os.SEEK_CUR) rollInSlice, _ = rollIn.ReadSlice() } if rollOutPos-rollOutBase >= len(rollOutSlice) { // Next slice please rollOutBase, _ = rollOut.ReadSeek(len(rollOutSlice), os.SEEK_CUR) rollOutSlice, _ = rollOut.ReadSlice() } if rollInPos >= MIN_BLOCK_SIZE { partSum.Rollout(rollOutSlice[rollOutPos-rollOutBase]) rollOutPos++ } partSum.Rollin(rollInSlice[rollInPos-rollInBase]) rollInPos++ if rollInPos >= MIN_BLOCK_SIZE { d := partSum.Digest() if d >= maxd { maxd = d splitPosition = rollInPos maxSum = partSum // Keep the sum so we can continue from here } } } } // Split an swap right := fileData.Split(splitPosition) blockData = fileData fileData = right offset += int64(blockData.Len()) session.ReadData += int64(blockData.Len()) // TODO: add encryption and custom compression here var datakey core.Byte128 id := session.Client.StoreData(core.BlockDataTypeZlib, blockData, nil) links = append(links, id) chain.ChainBlocks = append(chain.ChainBlocks, id) chain.DecryptKeys = append(chain.DecryptKeys, datakey) } if len(chain.ChainBlocks) > 1 { id := session.Client.StoreData(core.BlockDataTypeZlib, SerializeToByteArray(chain), links) entry.ContentType = ContentTypeFileChain entry.ContentBlockID = id } else { entry.ContentType = ContentTypeFileData entry.ContentBlockID = chain.ChainBlocks[0] entry.DecryptKey = chain.DecryptKeys[0] } return nil }