func Restore(blobpath string, c btncrypt.Cipher, bs blobstore.BlobStore, cb DecodeCallback) error { r, err := bs.OpenReader(blobpath) if err != nil { return err } cr, err := chunkstore.NewChunkReader(r, c) if err != nil { return err } logger.Debugf(mylog, "serialized blob size: %d", cr.Length()) zr, err := zlib.NewReader(&io.LimitedReader{cr, int64(cr.Length())}) if err != nil { return err } logger.Debugf(mylog, "statesnapshot.Restore: zlib init success!") dec := gob.NewDecoder(zr) es := []error{} if err := cb(dec); err != nil { es = append(es, fmt.Errorf("Failed to decode state: %v", err)) } if err := zr.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close zlib Reader: %v", err)) } if err := r.Close(); err != nil { es = append(es, fmt.Errorf("Failed to close BlobHandle: %v", err)) } if err := util.ToErrors(es); err != nil { return err } return nil }
func (db *DB) applyTransactionInternal(tx DBTransaction, writeTxLogFlag bool) (TxID, error) { logger.Debugf(mylog, "applyTransactionInternal(%+v, writeTxLog: %t)", tx, writeTxLogFlag) if tx.TxID == AnyVersion { tx.TxID = db.state.version + 1 } else if tx.TxID != db.state.version+1 { return 0, fmt.Errorf("Attempted to apply tx %d to dbver %d. Next accepted tx is %d", tx.TxID, db.state.version, db.state.version+1) } for _, op := range tx.Ops { if err := op.Apply(db.state); err != nil { if rerr := db.RestoreVersion(db.state.version); rerr != nil { logger.Panicf(mylog, "Following Error: %v. DB rollback failed!!!: %v", err, rerr) } return 0, err } } if writeTxLogFlag == writeTxLog { if err := db.txLogIO.AppendTransaction(tx); err != nil { if rerr := db.RestoreVersion(db.state.version); rerr != nil { logger.Panicf(mylog, "Failed to write txlog: %v. DB rollback failed!!!: %v", err, rerr) } return 0, fmt.Errorf("Failed to write txlog: %v", err) } } db.state.version = tx.TxID db.stats.LastTx = time.Now() return tx.TxID, nil }
func (cbv *CachedBackendVersion) Query(blobpath string) (version.Version, error) { cbv.mu.Lock() defer cbv.mu.Unlock() // FIXME: unlock earlier? if ver, ok := cbv.cache[blobpath]; ok { logger.Debugf(mylog, "return cached ver for \"%s\" -> %d", blobpath, ver) return ver, nil } r, err := cbv.backendbs.OpenReader(blobpath) if err != nil { if err == ENOENT { cbv.cache[blobpath] = 0 return 0, nil } return -1, fmt.Errorf("Failed to open backend blob for ver query: %v", err) } defer func() { if err := r.Close(); err != nil { logger.Criticalf(mylog, "Failed to close backend blob handle for querying version: %v", err) } }() ver, err := cbv.queryVersion(r) if err != nil { return -1, fmt.Errorf("Failed to query backend blob ver: %v", err) } cbv.cache[blobpath] = ver return ver, nil }
func (txio *CachedDBTransactionLogIO) QueryTransactions(minID TxID) ([]DBTransaction, error) { if minID < txio.oldestTxID { logger.Debugf(clog, "Queried id range of \">= %d\" is not cached. Falling back to backend.", minID) return txio.be.QueryTransactions(minID) } return txio.QueryCachedTransactions(minID) }
func (cfio *ChunkedFileIO) newFileChunk(newo int64) (inodedb.FileChunk, error) { bpath, err := blobstore.GenerateNewBlobPath(cfio.bs) if err != nil { return inodedb.FileChunk{}, fmt.Errorf("Failed to generate new blobpath: %v", err) } fc := inodedb.FileChunk{Offset: newo, Length: 0, BlobPath: bpath} logger.Debugf(mylog, "new chunk %+v", fc) return fc, nil }
func (ch *ChunkIO) PRead(p []byte, offset int64) error { if err := ch.ensureHeader(); err != nil { return err } if offset < 0 || math.MaxInt32 < offset { return fmt.Errorf("Offset out of int32 range: %d", offset) } logger.Debugf(mylog, "ChunkIO: PRead off %d len %d. Chunk payload len: %d", offset, len(p), ch.PayloadLen()) remo := int(offset) remp := p for len(remp) > 0 { i := remo / ContentFramePayloadLength f, err := ch.readContentFrame(i) if err != nil { return err } inframeOffset := remo - f.Offset if inframeOffset < 0 { panic("ASSERT: inframeOffset must be non-negative here") } // logger.Debugf(mylog, "ChunkIO: PRead: Decoded content frame. %+v", f) n := len(remp) valid := len(f.P) - inframeOffset // valid payload after offset logger.Debugf(mylog, "ChunkIO: PRead n: %d. valid: %d", n, valid) if n > valid { if f.IsLastFrame { return fmt.Errorf("Attempted to read beyond written size: %d. inframeOffset: %d, framePayloadLen: %d", remo, inframeOffset, len(f.P)) } n = valid } copy(remp[:n], f.P[inframeOffset:]) logger.Debugf(mylog, "ChunkIO: PRead: read %d bytes for off %d len %d", n, remo, len(remp)) remo += n remp = remp[n:] } return nil }
func (ch *ChunkIO) Sync() error { if ch.needsHeaderUpdate { if err := ch.header.WriteTo(&blobstore.OffsetWriter{ch.bh, 0}, ch.c); err != nil { return fmt.Errorf("Header write failed: %v", err) } logger.Debugf(mylog, "Wrote chunk header: %+v", ch.header) ch.needsHeaderUpdate = false } return nil }
func (n FileNode) Open(ctx context.Context, req *bfuse.OpenRequest, resp *bfuse.OpenResponse) (bfs.Handle, error) { logger.Debugf(mylog, "Open flags: %s", req.Flags.String()) fh, err := n.fs.OpenFile(n.id, Bazil2OtaruFlags(req.Flags)) if err != nil { return nil, err } return FileHandle{fh}, nil }
func TestWriterLogger(t *testing.T) { var b bytes.Buffer l := logger.WriterLogger{&b} logger.Debugf(l, "foobar") expre := regexp.MustCompile("writerlogger_test.go:\\d+: foobar\n") if !expre.Match(b.Bytes()) { t.Errorf("Unexpected: %s", b.String()) } }
func (wc *FileWriteCache) PWrite(p []byte, offset int64) error { pcopy := make([]byte, len(p)) copy(pcopy, p) newp := intn.Patch{Offset: offset, P: pcopy} logger.Debugf(wclog, "PWrite: %v", newp) // logger.Debugf(wclog, "PWrite: p=%v", pcopy) wc.ps = wc.ps.Merge(newp) return nil }
func (d DirNode) Setattr(ctx context.Context, req *bfuse.SetattrRequest, resp *bfuse.SetattrResponse) error { logger.Debugf(mylog, "Setattr mode %o", req.Mode) if err := otaruSetattr(d.fs, d.id, req); err != nil { return err } if err := d.Attr(ctx, &resp.Attr); err != nil { return err } return nil }
func (fh FileHandle) Write(ctx context.Context, req *bfuse.WriteRequest, resp *bfuse.WriteResponse) error { logger.Debugf(mylog, "Write offset %d size %d", req.Offset, len(req.Data)) if fh.h == nil { return EBADF } if err := fh.h.PWrite(req.Data, req.Offset); err != nil { return err } resp.Size = len(req.Data) return nil }
func (fh FileHandle) Read(ctx context.Context, req *bfuse.ReadRequest, resp *bfuse.ReadResponse) error { logger.Debugf(mylog, "Read offset %d size %d", req.Offset, req.Size) if fh.h == nil { return EBADF } resp.Data = resp.Data[:req.Size] n, err := fh.h.ReadAt(resp.Data, req.Offset) if err != nil { return err } resp.Data = resp.Data[:n] return nil }
func (wc *FileWriteCache) Sync(bh blobstore.PWriter) error { for _, p := range wc.ps { if p.IsSentinel() { continue } logger.Debugf(wclog, "Sync: %v", p) // logger.Debugf(wclog, "Sync: p=%v", p.P) if err := bh.PWrite(p.P, p.Offset); err != nil { return err } } wc.ps = wc.ps.Reset() return nil }
func RetryIfNeeded(f func() error, mylog logger.Logger) (err error) { const numRetries = 3 for i := 0; i < numRetries; i++ { start := time.Now() err = f() if err == nil { return } if !IsShouldRetryError(err) { return } if i < numRetries { logger.Debugf(mylog, "A Google Cloud Datastore operation has failed after %s. Retrying %d / %d...", time.Since(start), i+1, numRetries) } } return }
func (loc *INodeDBSSLocator) DeleteAll() ([]string, error) { start := time.Now() cli, err := loc.cfg.getClient(context.TODO()) if err != nil { return nil, err } dstx, err := cli.NewTransaction(context.TODO(), datastore.Serializable) if err != nil { return nil, err } keys := make([]*datastore.Key, 0) blobpaths := make([]string, 0) q := datastore.NewQuery(kindINodeDBSS).Ancestor(loc.rootKey).Transaction(dstx) it := cli.Run(context.TODO(), q) for { var e sslocentry k, err := it.Next(&e) if err != nil { if err == datastore.Done { break } dstx.Rollback() return nil, err } keys = append(keys, k) blobpaths = append(blobpaths, e.BlobPath) } logger.Debugf(sslog, "keys to delete: %v", keys) if err := dstx.DeleteMulti(keys); err != nil { dstx.Rollback() return nil, err } if _, err := dstx.Commit(); err != nil { return nil, err } logger.Infof(sslog, "DeleteAll() deleted %d entries. Took %s", len(keys), time.Since(start)) return blobpaths, nil }
func TestHandleCritical(t *testing.T) { called := false h := logger.HandleCritical(func() { called = true }) logger.Debugf(h, "debug") if called { t.Errorf("Shouldn't be triggered from debug msg") } logger.Criticalf(h, "critical") if !called { t.Errorf("Should be triggered from debug msg") } called = false logger.Criticalf(h, "critical2") if !called { t.Errorf("Should be triggered only once") } }
func (ch *ChunkIO) expandLengthBy(by int) error { if by < 0 { panic("Tried to expand by negative length") } if by == 0 { return nil } len64 := int64(ch.PayloadLen()) if len64+int64(by) > MaxChunkPayloadLen { return fmt.Errorf("Payload length out of range. Current: %d += %d", len64, by) } ch.header.PayloadLen = uint32(ch.PayloadLen() + by) logger.Debugf(mylog, "ChunkIO expandLength +%d = %d", by, ch.header.PayloadLen) ch.needsHeaderUpdate = true return nil }
func (n FileNode) Setattr(ctx context.Context, req *bfuse.SetattrRequest, resp *bfuse.SetattrResponse) error { if req.Valid.Size() { logger.Debugf(mylog, "Setattr size %d", req.Size) if req.Size > math.MaxInt64 { return fmt.Errorf("specified size too big: %d", req.Size) } if err := n.fs.TruncateFile(n.id, int64(req.Size)); err != nil { return err } } if err := otaruSetattr(n.fs, n.id, req); err != nil { return err } if err := n.Attr(ctx, &resp.Attr); err != nil { return err } return nil }
func (ch *ChunkIO) writeContentFrame(i int, f *decryptedContentFrame) error { // the offset of the start of the frame in blob blobOffset := ch.encryptedFrameOffset(i) wr := &blobstore.OffsetWriter{ch.bh, int64(blobOffset)} bew, err := btncrypt.NewWriteCloser(wr, ch.c, len(f.P)) if err != nil { return fmt.Errorf("Failed to create BtnEncryptWriteCloser: %v", err) } defer func() { if err := bew.Close(); err != nil { logger.Criticalf(mylog, "Failed to Close BtnEncryptWriteCloser: %v", err) } }() if _, err := bew.Write(f.P); err != nil { return fmt.Errorf("Failed to encrypt frame: %v", err) } ch.header.PayloadVersion++ ch.needsHeaderUpdate = true logger.Debugf(mylog, "ChunkIO: Wrote content frame idx: %d", i) return nil }
func (db *DB) RestoreVersion(version TxID) error { logger.Infof(mylog, "RestoreVersion(%s) start.", version) state, err := db.snapshotIO.RestoreSnapshot() if err != nil { return fmt.Errorf("Failed to restore snapshot: %v", err) } oldState := db.state db.state = state ssver := state.version logger.Infof(mylog, "Restored snapshot of ver %d.", ssver) if state.version > version { return fmt.Errorf("Can't rollback to old version %d which is older than snapshot version %d", version, state.version) } logger.Infof(mylog, "RestoreVersion(%s): restored ver: %s", version, ssver) txlog, err := db.txLogIO.QueryTransactions(ssver + 1) if txlog == nil || err != nil { db.state = oldState return fmt.Errorf("Failed to query txlog: %v", err) } for _, tx := range txlog { logger.Debugf(mylog, "RestoreVersion(%s): apply tx ver %s", version, tx.TxID) if _, err := db.applyTransactionInternal(tx, skipTxLog); err != nil { db.state = oldState return fmt.Errorf("Failed to replay tx: %v", err) } } logger.Infof(mylog, "Fast forward txlog from ver %d to %d", ssver, state.version) return nil }
func (ch *ChunkIO) readContentFrame(i int) (*decryptedContentFrame, error) { // the frame carries a part of the content at offset offset := i * ContentFramePayloadLength // payload length of the encrypted frame framePayloadLen := ContentFramePayloadLength isLastFrame := false distToLast := ch.PayloadLen() - offset if distToLast <= ContentFramePayloadLength { framePayloadLen = distToLast isLastFrame = true } // the offset of the start of the frame in blob blobOffset := ch.encryptedFrameOffset(i) rd := &blobstore.OffsetReader{ch.bh, int64(blobOffset)} bdr, err := btncrypt.NewReader(rd, ch.c, framePayloadLen) if err != nil { return nil, fmt.Errorf("Failed to create BtnDecryptReader: %v", err) } p := make([]byte, framePayloadLen, ContentFramePayloadLength) if _, err := io.ReadFull(bdr, p); err != nil { return nil, fmt.Errorf("Failed to decrypt frame idx: %d, err: %v", i, err) } if !bdr.HasReadAll() { panic("Incomplete frame read") } logger.Debugf(mylog, "ChunkIO: Read content frame idx: %d", i) return &decryptedContentFrame{ P: p, Offset: offset, IsLastFrame: isLastFrame, }, nil }
func (wc *FileWriteCache) ReadAtThrough(p []byte, offset int64, r ReadAter) (int, error) { nr := 0 remo := offset remp := p for _, patch := range wc.ps { if len(remp) == 0 { return nr, nil } if patch.IsSentinel() { break } if remo > patch.Right() { continue } if remo < patch.Left() { fallbackLen64 := util.Int64Min(int64(len(remp)), patch.Left()-remo) if fallbackLen64 > math.MaxInt32 { panic("Logic error: fallbackLen should always be in int32 range") } fallbackLen := int(fallbackLen64) n, err := r.ReadAt(remp[:fallbackLen], remo) logger.Debugf(wclog, "BeforePatch: ReadAt issued offset %d, len %d bytes, read %d bytes", remo, fallbackLen, n) if err != nil { return nr + n, err } if n < fallbackLen { zerofill(remp[n:fallbackLen]) } nr += fallbackLen remp = remp[fallbackLen:] remo += int64(fallbackLen) } if len(remp) == 0 { return nr, nil } applyOffset64 := remo - patch.Offset if applyOffset64 > math.MaxInt32 { panic("Logic error: applyOffset should always be in int32 range") } applyOffset := int(applyOffset64) applyLen := util.IntMin(len(patch.P)-applyOffset, len(remp)) copy(remp[:applyLen], patch.P[applyOffset:]) nr += applyLen remp = remp[applyLen:] remo += int64(applyLen) } n, err := r.ReadAt(remp, remo) logger.Debugf(wclog, "Last: ReadAt read %d bytes", n) if err != nil { return nr, err } nr += n return nr, nil }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) logger.Registry().AddOutput(logger.WriterLogger{os.Stderr}) flag.Usage = Usage flag.Parse() cfg, err := facade.NewConfig(*flagConfigDir) if err != nil { logger.Criticalf(mylog, "%v", err) Usage() os.Exit(2) } if flag.NArg() != 1 { Usage() os.Exit(2) } mountpoint := flag.Arg(0) if err := facade.SetupFluentLogger(cfg); err != nil { logger.Criticalf(mylog, "Failed to setup fluentd logger: %v", err) os.Exit(1) } o, err := facade.NewOtaru(cfg, &facade.OneshotConfig{Mkfs: *flagMkfs}) if err != nil { logger.Criticalf(mylog, "NewOtaru failed: %v", err) os.Exit(1) } var muClose sync.Mutex closeOtaruAndExit := func(exitCode int) { muClose.Lock() defer muClose.Unlock() if err := bfuse.Unmount(mountpoint); err != nil { logger.Warningf(mylog, "umount err: %v", err) } if o != nil { if err := o.Close(); err != nil { logger.Warningf(mylog, "Otaru.Close() returned errs: %v", err) } o = nil } os.Exit(exitCode) } defer closeOtaruAndExit(0) sigC := make(chan os.Signal, 1) signal.Notify(sigC, os.Interrupt) signal.Notify(sigC, syscall.SIGTERM) go func() { for s := range sigC { logger.Warningf(mylog, "Received signal: %v", s) closeOtaruAndExit(1) } }() logger.Registry().AddOutput(logger.HandleCritical(func() { logger.Warningf(mylog, "Starting shutdown due to critical event.") closeOtaruAndExit(1) })) bfuseLogger := logger.Registry().Category("bfuse") bfuse.Debug = func(msg interface{}) { logger.Debugf(bfuseLogger, "%v", msg) } if err := fuse.ServeFUSE(cfg.BucketName, mountpoint, o.FS, nil); err != nil { logger.Warningf(mylog, "ServeFUSE failed: %v", err) closeOtaruAndExit(1) } logger.Infof(mylog, "ServeFUSE end!") }
func (cfio *ChunkedFileIO) PWrite(p []byte, offset int64) error { logger.Debugf(mylog, "PWrite: offset=%d, len=%d", offset, len(p)) // logger.Debugf(mylog, "PWrite: p=%v", p) remo := offset remp := p if len(remp) == 0 { return nil } cs, err := cfio.caio.Read() if err != nil { return fmt.Errorf("Failed to read cs array: %v", err) } writeToChunk := func(c *inodedb.FileChunk, isNewChunk bool, maxChunkLen int64) error { if !fl.IsReadWriteAllowed(cfio.bs.Flags()) { return EPERM } flags := fl.O_RDWR if isNewChunk { flags |= fl.O_CREATE | fl.O_EXCL } bh, err := cfio.bs.Open(c.BlobPath, flags) if err != nil { return fmt.Errorf("Failed to open path \"%s\" for writing (isNewChunk: %t): %v", c.BlobPath, isNewChunk, err) } defer func() { if err := bh.Close(); err != nil { logger.Criticalf(mylog, "blobhandle Close failed: %v", err) } }() cio := cfio.newChunkIO(bh, cfio.c, c.Offset) defer func() { if err := cio.Close(); err != nil { logger.Criticalf(mylog, "cio Close failed: %v", err) } }() coff := remo - c.Offset n := util.IntMin(len(remp), int(maxChunkLen-coff)) if n < 0 { return nil } if err := cio.PWrite(remp[:n], coff); err != nil { return err } oldLength := c.Length c.Length = int64(cio.Size()) if oldLength != c.Length { if err := cfio.caio.Write(cs); err != nil { return fmt.Errorf("Failed to write updated cs array: %v", err) } } remo += int64(n) remp = remp[n:] return nil } for i := 0; i < len(cs); i++ { c := &cs[i] if c.Left() > remo { // Insert a new chunk @ i // try best to align offset at ChunkSplitSize newo := remo / ChunkSplitSize * ChunkSplitSize maxlen := int64(ChunkSplitSize) if i > 0 { prev := cs[i-1] pright := prev.Right() if newo < pright { maxlen -= pright - newo newo = pright } } if i < len(cs)-1 { next := cs[i+1] if newo+maxlen > next.Left() { maxlen = next.Left() - newo } } newc, err := cfio.newFileChunk(newo) if err != nil { return err } cs = append(cs, inodedb.FileChunk{}) copy(cs[i+1:], cs[i:]) cs[i] = newc if err := cfio.caio.Write(cs); err != nil { return fmt.Errorf("Failed to write updated cs array: %v", err) } if err := writeToChunk(&newc, NewChunk, maxlen); err != nil { return err } if len(remp) == 0 { break } continue } // Write to the chunk maxlen := int64(ChunkSplitSize) if i < len(cs)-1 { next := cs[i+1] if c.Left()+maxlen > next.Left() { maxlen = next.Left() - c.Left() } } if err := writeToChunk(c, ExistingChunk, maxlen); err != nil { return err } if len(remp) == 0 { break } } for len(remp) > 0 { // Append a new chunk at the end newo := remo / ChunkSplitSize * ChunkSplitSize maxlen := int64(ChunkSplitSize) if len(cs) > 0 { last := cs[len(cs)-1] lastRight := last.Right() if newo < lastRight { maxlen -= lastRight - newo newo = lastRight } } newc, err := cfio.newFileChunk(newo) if err != nil { return err } if err := writeToChunk(&newc, NewChunk, maxlen); err != nil { return err } cs = append(cs, newc) if err := cfio.caio.Write(cs); err != nil { return fmt.Errorf("Failed to write updated cs array: %v", err) } } return nil }
func (ch *ChunkIO) PWrite(p []byte, offset int64) error { logger.Debugf(mylog, "PWrite: offset %d, len %d", offset, len(p)) // logger.Debugf(mylog, "PWrite: p=%v", p) if err := ch.ensureHeader(); err != nil { return err } if len(p) == 0 { return nil } if offset < 0 || math.MaxInt32 < offset { return fmt.Errorf("Offset out of range: %d", offset) } remo := int(offset) remp := p if remo > ch.PayloadLen() { // if expanding, zero fill content frames up to write offset zfoff := ch.PayloadLen() zflen := remo - ch.PayloadLen() for zflen > 0 { logger.Debugf(mylog, "PWrite zfoff: %d, zflen: %d", zfoff, zflen) i := zfoff / ContentFramePayloadLength fOffset := i * ContentFramePayloadLength var f *decryptedContentFrame inframeOffset := zfoff - fOffset if zfoff == ch.PayloadLen() && inframeOffset == 0 { logger.Debugf(mylog, "PWrite: write new zero fill frame") // FIXME: maybe skip writing pure 0 frame. // Old sambad writes a byte of the end of the file instead of ftruncate, which is a nightmare in the current impl. n := util.IntMin(zflen, ContentFramePayloadLength) f = &decryptedContentFrame{ P: ZeroContent[:n], Offset: fOffset, IsLastFrame: false, } zfoff += n zflen -= n ch.expandLengthBy(n) logger.Debugf(mylog, " len: %d", n) } else { n := util.IntMin(zflen, ContentFramePayloadLength-inframeOffset) logger.Debugf(mylog, "PWrite: zero fill last of existing content frame. len: %d f.P[%d:%d] = 0", n, inframeOffset, inframeOffset+n) // read the frame var err error f, err = ch.readContentFrame(i) if err != nil { return err } if fOffset != f.Offset { panic("fOffset != f.Offset") } // expand & zero fill f.P = f.P[:inframeOffset+n] j := 0 for j < n { f.P[inframeOffset+j] = 0 j++ } zfoff += n zflen -= n ch.expandLengthBy(n) } // writeback the frame if err := ch.writeContentFrame(i, f); err != nil { return fmt.Errorf("failed to write back the encrypted frame: %v", err) } } } for len(remp) > 0 { i := remo / ContentFramePayloadLength fOffset := i * ContentFramePayloadLength var f *decryptedContentFrame if remo == ch.PayloadLen() && fOffset == remo { logger.Debugf(mylog, "PWrite: Preparing new frame to append") f = &decryptedContentFrame{ P: make([]byte, 0, ContentFramePayloadLength), Offset: fOffset, IsLastFrame: true, } } else { logger.Debugf(mylog, "PWrite: Read existing frame %d to append/update", i) var err error f, err = ch.readContentFrame(i) if err != nil { return err } if fOffset != f.Offset { panic("fOffset != f.Offset") } } // modify the payload inframeOffset := remo - f.Offset if inframeOffset < 0 { panic("ASSERT: inframeOffset must be non-negative here") } n := len(remp) valid := len(f.P) - inframeOffset // valid payload after offset if len(remp) > valid && f.IsLastFrame { // expand the last frame as needed newSize := inframeOffset + n if newSize > ContentFramePayloadLength { f.IsLastFrame = false newSize = ContentFramePayloadLength } logger.Debugf(mylog, "PWrite: Expanding the last frame from %d to %d", len(f.P), newSize) expandLen := newSize - len(f.P) if err := ch.expandLengthBy(expandLen); err != nil { return err } f.P = f.P[:newSize] valid = newSize - inframeOffset } if valid == 0 { panic("Inf loop") } n = util.IntMin(n, valid) copy(f.P[inframeOffset:inframeOffset+n], remp) // writeback the updated encrypted frame if err := ch.writeContentFrame(i, f); err != nil { return fmt.Errorf("failed to write back the encrypted frame: %v", err) } logger.Debugf(mylog, "PWrite: wrote %d bytes for off %d len %d", n, offset, len(remp)) remo += n remp = remp[n:] } return nil }