コード例 #1
0
ファイル: btncrypt.go プロジェクト: postfix/otaru
func (bdr *Reader) Read(p []byte) (int, error) {
	nr := util.IntMin(len(p), bdr.lenTotal-bdr.lenRead)
	left := p[:nr]

	if nr == 0 {
		return 0, io.EOF
	}

	n := 0
	for len(left) > 0 {
		if len(bdr.unread) == 0 {
			if err := bdr.decryptNextFrame(); err != nil {
				return n, err
			}
		}
		if len(bdr.unread) == 0 {
			panic("decryptNextFrame should have decrypted something and placed it on the buf")
		}

		consumeLen := util.IntMin(len(bdr.unread), len(left))
		copy(left[:consumeLen], bdr.unread[:consumeLen])
		bdr.unread = bdr.unread[consumeLen:]
		left = left[consumeLen:]
		n += consumeLen
		bdr.lenRead += consumeLen
	}

	return n, nil
}
コード例 #2
0
ファイル: btncrypt.go プロジェクト: postfix/otaru
func (bew *WriteCloser) Write(p []byte) (int, error) {
	if len(p) == 0 {
		return 0, nil
	}

	left := p
	for len(left) > 0 {
		framePayloadLen := util.IntMin(bew.frameEncryptor.CapacityLeft(), len(left))
		framePayload := left[:framePayloadLen]
		if _, err := bew.frameEncryptor.Write(framePayload); err != nil {
			panic(err)
		}
		left = left[framePayloadLen:]
		bew.lenWritten += framePayloadLen

		if bew.frameEncryptor.CapacityLeft() == 0 {
			if err := bew.flushFrame(); err != nil {
				return 0, err
			}
		}
		if bew.frameEncryptor.CapacityLeft() == 0 {
			panic("flushFrame should have brought back capacity")
		}
	}

	return len(p), nil
}
コード例 #3
0
ファイル: btncrypt.go プロジェクト: postfix/otaru
func (bdr *Reader) decryptNextFrame() error {
	frameLen := util.IntMin(bdr.lenTotal-bdr.lenRead, BtnFrameMaxPayload)
	encryptedFrameLen := bdr.c.EncryptedFrameSize(frameLen)
	// fmt.Printf("frameLen: %d, encryptedFrameLen: %d\n", frameLen, encryptedFrameLen)

	bdr.encrypted = bdr.encrypted[:encryptedFrameLen]
	if _, err := io.ReadFull(bdr.src, bdr.encrypted); err != nil {
		return err
	}

	nonceSize := bdr.c.gcm.NonceSize()
	nonce := bdr.encrypted[:nonceSize]
	ciphertext := bdr.encrypted[nonceSize:]

	var err error
	bdr.decrypted = bdr.decrypted[:0]
	if bdr.decrypted, err = bdr.c.gcm.Open(bdr.decrypted, nonce, ciphertext, nil); err != nil {
		return err
	}
	bdr.unread = bdr.decrypted

	return nil
}
コード例 #4
0
ファイル: chunkstore.go プロジェクト: hajimehoshi/otaru
func (ch *ChunkIO) PWrite(offset int64, p []byte) error {
	log.Printf("PWrite: offset %d, len %d", offset, len(p))
	// log.Printf("PWrite: p=%v", p)

	if err := ch.ensureHeader(); err != nil {
		return err
	}

	if len(p) == 0 {
		return nil
	}
	if offset < 0 || math.MaxInt32 < offset {
		return fmt.Errorf("Offset out of range: %d", offset)
	}

	remo := int(offset)
	remp := p

	if remo > ch.PayloadLen() {
		// if expanding, zero fill content frames up to write offset

		zfoff := ch.PayloadLen()
		zflen := remo - ch.PayloadLen()

		for zflen > 0 {
			log.Printf("PWrite zfoff: %d, zflen: %d", zfoff, zflen)
			i := zfoff / ContentFramePayloadLength
			fOffset := i * ContentFramePayloadLength

			var f *decryptedContentFrame

			inframeOffset := zfoff - fOffset
			if zfoff == ch.PayloadLen() && inframeOffset == 0 {
				log.Printf("PWrite: write new zero fill frame")

				// FIXME: maybe skip writing pure 0 frame.
				//        Old sambad writes a byte of the end of the file instead of ftruncate, which is a nightmare in the current impl.

				n := util.IntMin(zflen, ContentFramePayloadLength)

				f = &decryptedContentFrame{
					P:           ZeroContent[:n],
					Offset:      fOffset,
					IsLastFrame: false,
				}

				zfoff += n
				zflen -= n
				ch.expandLengthBy(n)
				log.Printf(" len: %d", n)
			} else {
				n := util.IntMin(zflen, ContentFramePayloadLength-inframeOffset)
				log.Printf("PWrite: zero fill last of existing content frame. len: %d f.P[%d:%d] = 0", n, inframeOffset, inframeOffset+n)

				// read the frame
				var err error
				f, err = ch.readContentFrame(i)
				if err != nil {
					return err
				}
				if fOffset != f.Offset {
					panic("fOffset != f.Offset")
				}

				// expand & zero fill
				f.P = f.P[:inframeOffset+n]
				j := 0
				for j < n {
					f.P[inframeOffset+j] = 0
					j++
				}

				zfoff += n
				zflen -= n
				ch.expandLengthBy(n)
			}

			// writeback the frame
			if err := ch.writeContentFrame(i, f); err != nil {
				return fmt.Errorf("failed to write back the encrypted frame: %v", err)
			}
		}
	}

	for len(remp) > 0 {
		i := remo / ContentFramePayloadLength
		fOffset := i * ContentFramePayloadLength

		var f *decryptedContentFrame
		if remo == ch.PayloadLen() && fOffset == remo {
			log.Printf("PWrite: Preparing new frame to append")
			f = &decryptedContentFrame{
				P:           make([]byte, 0, ContentFramePayloadLength),
				Offset:      fOffset,
				IsLastFrame: true,
			}
		} else {
			log.Printf("PWrite: Read existing frame %d to append/update", i)
			var err error
			f, err = ch.readContentFrame(i)
			if err != nil {
				return err
			}
			if fOffset != f.Offset {
				panic("fOffset != f.Offset")
			}
		}

		// modify the payload
		inframeOffset := remo - f.Offset
		if inframeOffset < 0 {
			panic("ASSERT: inframeOffset must be non-negative here")
		}

		n := len(remp)
		valid := len(f.P) - inframeOffset // valid payload after offset
		if len(remp) > valid && f.IsLastFrame {
			// expand the last frame as needed
			newSize := inframeOffset + n
			if newSize > ContentFramePayloadLength {
				f.IsLastFrame = false
				newSize = ContentFramePayloadLength
			}

			log.Printf("PWrite: Expanding the last frame from %d to %d", len(f.P), newSize)

			expandLen := newSize - len(f.P)
			if err := ch.expandLengthBy(expandLen); err != nil {
				return err
			}

			f.P = f.P[:newSize]
			valid = newSize - inframeOffset
		}
		if valid == 0 {
			panic("Inf loop")
		}
		n = util.IntMin(n, valid)

		copy(f.P[inframeOffset:inframeOffset+n], remp)

		// writeback the updated encrypted frame
		if err := ch.writeContentFrame(i, f); err != nil {
			return fmt.Errorf("failed to write back the encrypted frame: %v", err)
		}
		log.Printf("PWrite: wrote %d bytes for off %d len %d", n, offset, len(remp))

		remo += n
		remp = remp[n:]
	}

	return nil
}
コード例 #5
0
ファイル: chunkedfile.go プロジェクト: postfix/otaru
func (cfio *ChunkedFileIO) PWrite(p []byte, offset int64) error {
	logger.Debugf(mylog, "PWrite: offset=%d, len=%d", offset, len(p))
	// logger.Debugf(mylog, "PWrite: p=%v", p)
	remo := offset
	remp := p
	if len(remp) == 0 {
		return nil
	}

	cs, err := cfio.caio.Read()
	if err != nil {
		return fmt.Errorf("Failed to read cs array: %v", err)
	}

	writeToChunk := func(c *inodedb.FileChunk, isNewChunk bool, maxChunkLen int64) error {
		if !fl.IsReadWriteAllowed(cfio.bs.Flags()) {
			return EPERM
		}

		flags := fl.O_RDWR
		if isNewChunk {
			flags |= fl.O_CREATE | fl.O_EXCL
		}
		bh, err := cfio.bs.Open(c.BlobPath, flags)
		if err != nil {
			return fmt.Errorf("Failed to open path \"%s\" for writing (isNewChunk: %t): %v", c.BlobPath, isNewChunk, err)
		}
		defer func() {
			if err := bh.Close(); err != nil {
				logger.Criticalf(mylog, "blobhandle Close failed: %v", err)
			}
		}()

		cio := cfio.newChunkIO(bh, cfio.c, c.Offset)
		defer func() {
			if err := cio.Close(); err != nil {
				logger.Criticalf(mylog, "cio Close failed: %v", err)
			}
		}()

		coff := remo - c.Offset
		n := util.IntMin(len(remp), int(maxChunkLen-coff))
		if n < 0 {
			return nil
		}
		if err := cio.PWrite(remp[:n], coff); err != nil {
			return err
		}
		oldLength := c.Length
		c.Length = int64(cio.Size())
		if oldLength != c.Length {
			if err := cfio.caio.Write(cs); err != nil {
				return fmt.Errorf("Failed to write updated cs array: %v", err)
			}
		}

		remo += int64(n)
		remp = remp[n:]
		return nil
	}

	for i := 0; i < len(cs); i++ {
		c := &cs[i]
		if c.Left() > remo {
			// Insert a new chunk @ i

			// try best to align offset at ChunkSplitSize
			newo := remo / ChunkSplitSize * ChunkSplitSize
			maxlen := int64(ChunkSplitSize)
			if i > 0 {
				prev := cs[i-1]
				pright := prev.Right()
				if newo < pright {
					maxlen -= pright - newo
					newo = pright
				}
			}
			if i < len(cs)-1 {
				next := cs[i+1]
				if newo+maxlen > next.Left() {
					maxlen = next.Left() - newo
				}
			}

			newc, err := cfio.newFileChunk(newo)
			if err != nil {
				return err
			}
			cs = append(cs, inodedb.FileChunk{})
			copy(cs[i+1:], cs[i:])
			cs[i] = newc
			if err := cfio.caio.Write(cs); err != nil {
				return fmt.Errorf("Failed to write updated cs array: %v", err)
			}

			if err := writeToChunk(&newc, NewChunk, maxlen); err != nil {
				return err
			}
			if len(remp) == 0 {
				break
			}

			continue
		}

		// Write to the chunk
		maxlen := int64(ChunkSplitSize)
		if i < len(cs)-1 {
			next := cs[i+1]
			if c.Left()+maxlen > next.Left() {
				maxlen = next.Left() - c.Left()
			}
		}
		if err := writeToChunk(c, ExistingChunk, maxlen); err != nil {
			return err
		}
		if len(remp) == 0 {
			break
		}
	}

	for len(remp) > 0 {
		// Append a new chunk at the end
		newo := remo / ChunkSplitSize * ChunkSplitSize
		maxlen := int64(ChunkSplitSize)

		if len(cs) > 0 {
			last := cs[len(cs)-1]
			lastRight := last.Right()
			if newo < lastRight {
				maxlen -= lastRight - newo
				newo = lastRight
			}
		}

		newc, err := cfio.newFileChunk(newo)
		if err != nil {
			return err
		}
		if err := writeToChunk(&newc, NewChunk, maxlen); err != nil {
			return err
		}

		cs = append(cs, newc)
		if err := cfio.caio.Write(cs); err != nil {
			return fmt.Errorf("Failed to write updated cs array: %v", err)
		}
	}

	return nil
}
コード例 #6
0
ファイル: filewritecache.go プロジェクト: postfix/otaru
func (wc *FileWriteCache) ReadAtThrough(p []byte, offset int64, r ReadAter) (int, error) {
	nr := 0
	remo := offset
	remp := p

	for _, patch := range wc.ps {
		if len(remp) == 0 {
			return nr, nil
		}

		if patch.IsSentinel() {
			break
		}

		if remo > patch.Right() {
			continue
		}

		if remo < patch.Left() {
			fallbackLen64 := util.Int64Min(int64(len(remp)), patch.Left()-remo)
			if fallbackLen64 > math.MaxInt32 {
				panic("Logic error: fallbackLen should always be in int32 range")
			}
			fallbackLen := int(fallbackLen64)

			n, err := r.ReadAt(remp[:fallbackLen], remo)
			logger.Debugf(wclog, "BeforePatch: ReadAt issued offset %d, len %d bytes, read %d bytes", remo, fallbackLen, n)
			if err != nil {
				return nr + n, err
			}
			if n < fallbackLen {
				zerofill(remp[n:fallbackLen])
			}

			nr += fallbackLen
			remp = remp[fallbackLen:]
			remo += int64(fallbackLen)
		}

		if len(remp) == 0 {
			return nr, nil
		}

		applyOffset64 := remo - patch.Offset
		if applyOffset64 > math.MaxInt32 {
			panic("Logic error: applyOffset should always be in int32 range")
		}
		applyOffset := int(applyOffset64)
		applyLen := util.IntMin(len(patch.P)-applyOffset, len(remp))
		copy(remp[:applyLen], patch.P[applyOffset:])

		nr += applyLen
		remp = remp[applyLen:]
		remo += int64(applyLen)
	}

	n, err := r.ReadAt(remp, remo)
	logger.Debugf(wclog, "Last: ReadAt read %d bytes", n)
	if err != nil {
		return nr, err
	}
	nr += n

	return nr, nil
}