func newCompStream(conn net.Conn) *compStream { c := new(compStream) c.conn = conn c.w = snappy.NewBufferedWriter(conn) c.r = snappy.NewReader(conn) return c }
// Contents of the returned io.Reader are gzipped. func buildWriteValueRequest(serializedChunks io.Reader, hints map[hash.Hash]struct{}) io.Reader { body := &bytes.Buffer{} gw := snappy.NewBufferedWriter(body) serializeHints(gw, hints) d.Chk.NoError(gw.Close()) return io.MultiReader(body, serializedChunks) }
func serializeChunks(chnx []chunks.Chunk, assert *assert.Assertions) io.Reader { body := &bytes.Buffer{} gw := snappy.NewBufferedWriter(body) sz := chunks.NewSerializer(gw) assert.NoError(sz.PutMany(chnx)) assert.NoError(sz.Close()) assert.NoError(gw.Close()) return body }
func serializeChunks(chnx []chunks.Chunk, assert *assert.Assertions) io.Reader { body := &bytes.Buffer{} sw := snappy.NewBufferedWriter(body) for _, chunk := range chnx { chunks.Serialize(chunk, sw) } assert.NoError(sw.Close()) return body }
func respWriter(req *http.Request, w http.ResponseWriter) (writer io.WriteCloser) { writer = wc{w.(io.Writer)} if strings.Contains(req.Header.Get("Accept-Encoding"), "gzip") { w.Header().Add("Content-Encoding", "gzip") gw := gzip.NewWriter(w) writer = gw } else if strings.Contains(req.Header.Get("Accept-Encoding"), "x-snappy-framed") { w.Header().Add("Content-Encoding", "x-snappy-framed") sw := snappy.NewBufferedWriter(w) writer = sw } return }
// Contents of the returned io.Reader are snappy-compressed. func buildWriteValueRequest(chunkChan chan *chunks.Chunk, hints map[hash.Hash]struct{}) io.Reader { body, pw := io.Pipe() go func() { gw := snappy.NewBufferedWriter(pw) serializeHints(gw, hints) for c := range chunkChan { chunks.Serialize(*c, gw) } d.Chk.NoError(gw.Close()) d.Chk.NoError(pw.Close()) }() return body }
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false. func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool { hash := c.Hash() dbKey, present := func() (dbKey []byte, present bool) { p.mu.Lock() defer p.mu.Unlock() if _, present = p.chunkIndex[hash]; !present { dbKey = toDbKey(refHeight, c.Hash()) p.chunkIndex[hash] = dbKey } return }() if !present { buf := &bytes.Buffer{} gw := snappy.NewBufferedWriter(buf) chunks.Serialize(c, gw) gw.Close() d.Chk.NoError(p.orderedChunks.Put(dbKey, buf.Bytes(), nil)) return true } return false }
func FuzzFraming(data []byte) int { r := snappy.NewReader(bytes.NewReader(data)) buf := make([]byte, 0, 1023) dec := make([]byte, 0, 1024) for i := 0; ; i++ { x := i if x > cap(buf) { x = cap(buf) } n, err := r.Read(buf[:x]) if n != 0 { dec = append(dec, buf[:n]...) } if err == io.EOF { break } if err != nil { return 0 } } r.Reset(bytes.NewReader(data)) dec1, err := ioutil.ReadAll(r) if err != nil { panic(err) } if bytes.Compare(dec, dec1) != 0 { fmt.Printf("dec0: %q\n", dec) fmt.Printf("dec1: %q\n", dec1) panic("not equal") } bufw := new(bytes.Buffer) w := snappy.NewBufferedWriter(bufw) for i := 0; len(dec1) > 0; i++ { x := i if x > len(dec1) { x = len(dec1) } n, err := w.Write(dec1[:x]) if n != x { panic("short write") } if err != nil { panic(err) } dec1 = dec1[x:] if (i % 2) != 0 { w.Flush() } } w.Close() dec1 = append([]byte{}, dec...) bufw2 := new(bytes.Buffer) w2 := snappy.NewWriter(bufw2) for i := 2; len(dec1) > 0; i++ { x := i if x > len(dec1) { x = len(dec1) } n, err := w2.Write(dec1[:x]) if n != x { panic("short write") } if err != nil { panic(err) } dec1 = dec1[x:] if (i % 2) != 0 { w2.Flush() } } w2.Close() r2 := snappy.NewReader(bufw) dec2, err := ioutil.ReadAll(r2) if err != nil { panic(err) } if bytes.Compare(dec, dec2) != 0 { panic("not equal") } r3 := snappy.NewReader(bufw2) dec3, err := ioutil.ReadAll(r3) if err != nil { panic(err) } if bytes.Compare(dec, dec3) != 0 { panic("not equal") } return 1 }
func (subvolume Subvolume) RunSnapshot() (err error) { //TODO Move Lock to after snapshot. To allow snapshots if a previous long send //is still running. Need to implement some guarantee that two instances don't //try to create the same snapshot at the same time and then delete cause one //will fail lock, err := NewDirLock(subvolume.SnapshotsLoc.Directory) if err != nil { return } defer lock.Unlock() timestamp := getCurrentTimestamp() snapshot := Snapshot{subvolume.SnapshotsLoc, timestamp} err = os.MkdirAll(path.Dir(snapshot.Path()), dirMode) if err != nil { return } btrfsCmd := exec.Command(btrfsBin, "subvolume", "snapshot", "-r", subvolume.Directory, snapshot.Path()) if verbosity > 1 { printCommand(btrfsCmd) btrfsCmd.Stdout = os.Stderr btrfsCmd.Stderr = os.Stderr } err = btrfsCmd.Run() if err != nil { if verbosity > 0 { log.Println("Snapshot failed") } if _, errTmp := os.Stat(snapshot.Path()); !os.IsNotExist(errTmp) { errTmp = snapshot.DeleteSnapshot() if errTmp != nil { if verbosity > 0 { log.Println("Failed to deleted to failed snapshot") } } } return } if *pinnedFlag { subvolume.SnapshotsLoc.PinTimestamp(timestamp) } if *archiveFlag { archiveDir := path.Join(subvolume.SnapshotsLoc.Directory, "archive") err = os.MkdirAll(archiveDir, dirMode) if err != nil { return } btrfsCmd = exec.Command(btrfsBin, "send", snapshot.Path()) extension := "" if !*noCompressionFlag { extension = ".snpy" } archiveFile := path.Join(archiveDir, string(timestamp)+".snap"+extension) f, err := os.Create(archiveFile) if err != nil { return err } defer f.Close() if *noCompressionFlag { bf := bufio.NewWriter(f) defer bf.Flush() btrfsCmd.Stdout = bf } else { bf := snappy.NewBufferedWriter(f) defer bf.Close() btrfsCmd.Stdout = bf } if verbosity > 1 { printCommand(btrfsCmd) btrfsCmd.Stderr = os.Stderr } err = btrfsCmd.Run() if err != nil { // Not sure what to do if error _ = f.Close() os.Remove(archiveFile) return err } } timestamps, err := subvolume.SnapshotsLoc.ReadTimestampsDir() if err != nil { return } for _, remote := range subvolume.Remotes { err = remote.sendSnapshotUsingParent(snapshot, timestamps) if err != nil { log.Println("Error sending snapshot") log.Println(err.Error()) err = nil continue } } _, err = subvolume.SnapshotsLoc.CleanUp(timestamp, timestamps) if err != nil { return } return }
func (remote RemoteSnapshotsLoc) SendSnapshot(snapshot Snapshot, parent Timestamp) (err error) { var sendCmd *exec.Cmd if parent == "" { if verbosity > 1 { log.Println("Performing full send/receive") } sendCmd = exec.Command(btrfsBin, "send", snapshot.Path()) } else { if verbosity > 1 { log.Println("Performing incremental send/receive") } parentPath := path.Join(path.Dir(snapshot.Path()), string(parent)) sendCmd = exec.Command(btrfsBin, "send", "-p", parentPath, snapshot.Path()) } if verbosity > 1 { sendCmd.Stderr = os.Stderr } sendRd, sendWr := io.Pipe() defer sendRd.Close() var recvRunner CmdRunner var compOut *snappy.Writer closeComp := false if remote.Host == "" { sendCmd.Stdout = sendWr recvRunner = remote.SnapshotsLoc.ReceiveAndCleanUp(sendRd, snapshot.timestamp) } else { if *noCompressionFlag { sendCmd.Stdout = sendWr recvRunner = remote.RemoteReceive(sendRd, snapshot.timestamp) } else { compOut = snappy.NewBufferedWriter(sendWr) closeComp = true sendCmd.Stdout = compOut recvRunner = remote.RemoteReceive(sendRd, snapshot.timestamp) } } sendRunner := RunCommand(sendCmd) err = <-recvRunner.Started if err != nil { log.Println("Error starting btrfs receive") return } if verbosity > 1 { printCommand(sendCmd) } err = <-sendRunner.Started if err != nil { log.Println("Error starting btrfs send") return } select { case err = <-sendRunner.Done: if err != nil { log.Println("Error running btrfs send") } if closeComp { compOut.Close() } sendWr.Close() <-recvRunner.Done return case err = <-recvRunner.Done: if err != nil { log.Println("Error running btrfs receive") } sendRunner.Signal <- os.Kill <-sendRunner.Done return } }