func (t *Tsdb) sendData() { counter := 0 bytesSent := 0 last := time.Now() ticker := time.NewTicker(time.Second * 10) for { select { case <-ticker.C: if counter > 0 { log.Info("published %d (%d bytes) payloads in last %f seconds", counter, bytesSent, time.Since(last).Seconds()) counter = 0 bytesSent = 0 last = time.Now() } case data := <-t.dataChan: u := t.Url.String() + data.Path body := new(bytes.Buffer) snappyBody := snappy.NewWriter(body) snappyBody.Write(data.Body) snappyBody.Close() req, err := http.NewRequest("POST", u, body) if err != nil { log.Error(3, "failed to create request payload. ", err) break } req.Header.Set("Content-Type", "rt-metric-binary-snappy") req.Header.Set("Authorization", "Bearer "+t.ApiKey) var reqBytesSent int sent := false for !sent { reqBytesSent = body.Len() if err := send(req); err != nil { log.Error(3, err.Error()) time.Sleep(time.Second) body.Reset() snappyBody := snappy.NewWriter(body) snappyBody.Write(data.Body) snappyBody.Close() } else { sent = true log.Debug("sent %d bytes", reqBytesSent) } } bytesSent += reqBytesSent counter++ } } }
func (c *snappyCompressor) Do(w io.Writer, p []byte) error { sw := snappy.NewWriter(w) if _, err := sw.Write(p); err != nil { return err } return sw.Close() }
// NewSnappyResponseWriter returns a `http.ResponseWriter` wrapper which can encode // the output with Snappy if requested by the client. // If snappy isn't enabled, it will act like a regular `http.ResponseWriter` // `Close` must be call so the `*snappy.Writer` instance can be put back in the `sync.Pool` func NewSnappyResponseWriter(rw http.ResponseWriter, r *http.Request) *snappyResponseWriter { var s *snappy.Writer // Set the necessary `Vary` header rw.Header().Set("Vary", "Accept-Encoding") // Disable caching of responses. rw.Header().Set("Cache-control", "no-cache") var writer io.Writer switch r.Header.Get("Accept-Encoding") { case "snappy": rw.Header().Set("Content-Encoding", "snappy") // Try to get a snappy.Writer from the pool if is := snappyWriterPool.Get(); is != nil { s = is.(*snappy.Writer) s.Reset(rw) } else { // Creates a new one if the pool is empty s = snappy.NewWriter(rw) } writer = s default: // No `Accept-Encoding` header (or unsupported) // Default to plain-text writer = rw } return &snappyResponseWriter{ snappyWriter: s, rw: rw, w: writer, } }
func snappy_compress(chunk []byte, n int) []byte { var buf bytes.Buffer writer := snappy.NewWriter(&buf) writer.Write(chunk[:n]) return buf.Bytes() }
func (f *File) encode(o interface{}) (bs []byte, err error) { buf := new(bytes.Buffer) if f.compressMethod == _COMPRESS_SNAPPY { w := snappy.NewWriter(buf) if f.codec == _CODEC_GOB { err = gob.NewEncoder(w).Encode(o) } else if f.codec == _CODEC_MSGPACK { err = msgpack.NewEncoder(w).Encode(o) } if err != nil { return nil, err } err = w.Close() if err != nil { return nil, err } } else { if f.codec == _CODEC_GOB { err = gob.NewEncoder(buf).Encode(o) } else if f.codec == _CODEC_MSGPACK { err = msgpack.NewEncoder(buf).Encode(o) } if err != nil { return nil, err } } return buf.Bytes(), nil }
func actionDump(c *cli.Context) { verbose := c.Bool(dumpFlagVerbose.Name) compress := c.Bool(dumpFlagCompress.Name) cd := c.String(dumpFlagCrashDir.Name) pid := c.Int(dumpFlagPid.Name) when := time.Unix(int64(c.Int(dumpFlagTime.Name)), 0) sig := c.Int(dumpFlagSig.Name) exe := c.String(dumpFlagExe.Name) // Make sure that we are using a sensible umask. oldMask := syscall.Umask(0000) defer syscall.Umask(oldMask) cd = filepath.Join(cd, exe, when.Format(time.RFC3339), fmt.Sprint(pid)) if err := os.MkdirAll(cd, 0755); err != nil { fmt.Fprintf(dumpOutputWriter, "Failed to create crash directory %s [%s]\n", cd, err) } ci := csi.CrashInspector{} if cr, err := ci.Inspect(pid, syscall.Signal(sig)); err != nil { fmt.Fprintf(dumpOutputWriter, "Failed to gather crash meta data [%s]\n", err) } else { if b, err := yaml.Marshal(cr); err != nil { fmt.Fprintf(dumpOutputWriter, "Failed to write crash meta data [%s]\n", err) } else { ry := filepath.Join(cd, "report.yaml") if f, err := os.Create(ry); err != nil { fmt.Fprintf(dumpOutputWriter, "Failed to dump crash report to %s [%s]\n", ry, err) } else { defer f.Close() fmt.Fprintf(f, "%s", b) } } } // And we finally dump the actual core file df := filepath.Join(cd, "core") if f, err := os.Create(df); err != nil { fmt.Fprintf(dumpOutputWriter, "Failed to dump core to %s [%s]", df, err) } else { defer f.Close() // TODO(tvoss): Investigate into syscall.Sendfile and figure out a way // to avoid copying of data to userspace. var dest io.Writer = f if compress { dest = snappy.NewWriter(f) } start := time.Now() n, _ := io.Copy(dest, os.Stdin) elapsed := time.Since(start) if verbose { fmt.Fprintf(dumpOutputWriter, "Wrote %d bytes of core dump to %s in %f seconds", n, df, elapsed.Seconds()) } } }
func snap(arr []byte) ([]byte, error) { w := bytes.NewBuffer(nil) sw := snappy.NewWriter(w) _, e := sw.Write(arr) if e != nil { return nil, e } return w.Bytes(), nil }
func do(isDecompress bool, filename, suffix string, isToStdout bool) (percentage, speed float64, err error) { var ( input io.Reader output io.Writer outName string = "-" ) if filename == "-" { input = os.Stdin output = os.Stdout } else { fi, err := os.Open(filename) if err != nil { return 0, 0, err } input = fi defer fi.Close() if isToStdout { output = os.Stdout } else { if isDecompress { if !strings.HasSuffix(filename, suffix) { err = errors.New(fmt.Sprintf("file: %s not has suffix %s", filename, suffix)) return 0, 0, err } outName = filename[:(len(filename) - len(suffix))] } else { outName = filename + suffix } fo, err := os.Create(outName) if err != nil { return 0, 0, err } output = fo defer fo.Close() } } start := time.Now() rwc := NewRWCounter(input, output) if isDecompress { _, err = io.Copy(rwc, snappy.NewReader(rwc)) } else { _, err = io.Copy(snappy.NewWriter(rwc), rwc) } useTime := time.Since(start).Seconds() if isDecompress { percentage = 1 - float64(rwc.CountR())/float64(rwc.CountW()) speed = float64(rwc.CountW()) / 1024.0 / 1024.0 / useTime } else { percentage = 1 - float64(rwc.CountW())/float64(rwc.CountR()) speed = float64(rwc.CountR()) / 1024.0 / 1024.0 / useTime } return }
// Store sends a batch of samples to the HTTP endpoint. func (c *Client) Store(samples model.Samples) error { req := &WriteRequest{ Timeseries: make([]*TimeSeries, 0, len(samples)), } for _, s := range samples { ts := &TimeSeries{ Labels: make([]*LabelPair, 0, len(s.Metric)), } for k, v := range s.Metric { ts.Labels = append(ts.Labels, &LabelPair{ Name: string(k), Value: string(v), }) } ts.Samples = []*Sample{ { Value: float64(s.Value), TimestampMs: int64(s.Timestamp), }, } req.Timeseries = append(req.Timeseries, ts) } data, err := proto.Marshal(req) if err != nil { return err } buf := bytes.Buffer{} if _, err := snappy.NewWriter(&buf).Write(data); err != nil { return err } httpReq, err := http.NewRequest("POST", c.url.String(), &buf) if err != nil { return err } httpReq.Header.Add("Content-Encoding", "snappy") ctx, _ := context.WithTimeout(context.Background(), c.timeout) httpResp, err := ctxhttp.Do(ctx, c.client, httpReq) if err != nil { return err } defer httpResp.Body.Close() if httpResp.StatusCode/100 != 2 { return fmt.Errorf("server returned HTTP status %s", httpResp.Status) } return nil }
func (s *Store) flushBuffer() (err error) { if s.currentWriter == nil { return fmt.Errorf("Flush without a current buffer") } log.Printf("Flushing updates for %s to disk\n", *s.currentFilename) sw := snappy.NewWriter(s.currentWriter) _, err = s.buf.WriteTo(sw) if err != nil { return } s.buf.Reset() return }
func (c *Conn) upgradeSnappy() error { conn := net.Conn(c.conn) if c.tlsConn != nil { conn = c.tlsConn } c.r = snappy.NewReader(conn) c.w = snappy.NewWriter(conn) frameType, data, err := ReadUnpackedResponse(c) if err != nil { return err } if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { return errors.New("invalid response from Snappy upgrade") } return nil }
func (m *mergeSorter) dumpShard() (err error) { defer func() { m.buffer = make([]interface{}, 0, m.opts.MaxInMemory) }() // Create a new shard file shardPath := filepath.Join(m.workDir, fmt.Sprintf("shard.%.6d", len(m.shards))) file, err := os.OpenFile(shardPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, shardFileMode) if err != nil { return fmt.Errorf("error creating shard: %v", err) } defer func() { replaceErrIfNil(&err, "error closing shard: %v", file.Close()) }() w := io.Writer(file) if m.opts.CompressShards { w = snappy.NewWriter(w) } // Buffer writing to the shard buf := bufio.NewWriterSize(w, m.opts.IOBufferSize) defer func() { replaceErrIfNil(&err, "error flushing shard: %v", buf.Flush()) }() // Sort the in-memory buffer of elements sortutil.Sort(m.opts.Lesser, m.buffer) // Write each element of the in-memory to shard file, in sorted order wr := delimited.NewWriter(buf) for len(m.buffer) > 0 { rec, err := m.opts.Marshaler.Marshal(m.buffer[0]) if err != nil { return fmt.Errorf("marshaling error: %v", err) } if _, err := wr.Write(rec); err != nil { return fmt.Errorf("writing error: %v", err) } m.buffer = m.buffer[1:] } m.shards = append(m.shards, shardPath) return nil }
// Compress a file to a snappy archive. func snap(src *os.File) (dst *os.File, err error) { // Remember to re-open the destination file after compression. defer func() { dst, err = os.Open(dst.Name()) }() // Get file info. srcInfo, err := src.Stat() if err != nil { return } srcName := src.Name() // Make sure existing files are not overwritten. dstName := concat(srcName, ".sz") getUnusedFilename(&dstName) print(concat(srcName, " > ", dstName)) // Create the destination file. dst, err = create(dstName, srcInfo.Mode()) if err != nil { return } // Set up a *passthru writer in order to print progress. pt := &passthru{ Writer: dst, nExpected: uint64(srcInfo.Size()), } defer pt.Reset() // Wrap a *snappy.Writer around the *passthru method. sz := snappy.NewWriter(pt) defer sz.Reset(nil) // Write the source file's contents to the new snappy file. _, err = snapCopy(sz, src) print() if err != nil { return } return }
func (e *defaultPageEncoder) compress(p []byte) ([]byte, error) { var compressed bytes.Buffer // TODO get from a buffer pool switch e.compression { case "gzip": w := gzip.NewWriter(&compressed) if _, err := w.Write(p); err != nil { return nil, err } case "snappy": wc := snappy.NewWriter(&compressed) if _, err := wc.Write(p); err != nil { return nil, err } case "": return p, nil default: log.Println("defaultPageEncoder: warning unknown compression codec.") } return compressed.Bytes(), nil }
func FuzzFraming(data []byte) int { r := snappy.NewReader(bytes.NewReader(data)) buf := make([]byte, 0, 1023) dec := make([]byte, 0, 1024) for i := 0; ; i++ { x := i if x > cap(buf) { x = cap(buf) } n, err := r.Read(buf[:x]) if n != 0 { dec = append(dec, buf[:n]...) } if err == io.EOF { break } if err != nil { return 0 } } r.Reset(bytes.NewReader(data)) dec1, err := ioutil.ReadAll(r) if err != nil { panic(err) } if bytes.Compare(dec, dec1) != 0 { fmt.Printf("dec0: %q\n", dec) fmt.Printf("dec1: %q\n", dec1) panic("not equal") } bufw := new(bytes.Buffer) w := snappy.NewBufferedWriter(bufw) for i := 0; len(dec1) > 0; i++ { x := i if x > len(dec1) { x = len(dec1) } n, err := w.Write(dec1[:x]) if n != x { panic("short write") } if err != nil { panic(err) } dec1 = dec1[x:] if (i % 2) != 0 { w.Flush() } } w.Close() dec1 = append([]byte{}, dec...) bufw2 := new(bytes.Buffer) w2 := snappy.NewWriter(bufw2) for i := 2; len(dec1) > 0; i++ { x := i if x > len(dec1) { x = len(dec1) } n, err := w2.Write(dec1[:x]) if n != x { panic("short write") } if err != nil { panic(err) } dec1 = dec1[x:] if (i % 2) != 0 { w2.Flush() } } w2.Close() r2 := snappy.NewReader(bufw) dec2, err := ioutil.ReadAll(r2) if err != nil { panic(err) } if bytes.Compare(dec, dec2) != 0 { panic("not equal") } r3 := snappy.NewReader(bufw2) dec3, err := ioutil.ReadAll(r3) if err != nil { panic(err) } if bytes.Compare(dec, dec3) != 0 { panic("not equal") } return 1 }
func progd_forword(ar cmdoptS) { //create metadata leveldb dbi, err := bolt.Open(ar.out_dir+"/md", 0600, nil) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } tx, err := dbi.Begin(true) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } defer tx.Rollback() db, err := tx.CreateBucket([]byte("Ketv1")) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } //generate crypto nonce nonce, _ := GenerateRandomBytes(24) //store it err = db.Put([]byte("nonce"), nonce) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } //calc key keyhasher := sha3.NewShake256() keyhasher.Write(nonce) keyhasher.Write([]byte(ar.secret_key)) xchachakey := make([]byte, 32) keyhasher.Read(xchachakey) poly1305key := make([]byte, 32) keyhasher.Read(poly1305key) //init stream var LimitedSizeWriteToFilei LimitedSizeWriteToFile LimitedSizeWriteToFilei.InitNow() LimitedSizeWriteToFilei.TargetPatten = ar.out_dir + "/df%X" if !ar.div_unitk { LimitedSizeWriteToFilei.BytesPerFile = int64(ar.div_at) * const_Mbyte } else { LimitedSizeWriteToFilei.BytesPerFile = int64(ar.div_at) * const_Kbyte } cryptos, err := chacha20.NewXChaCha(xchachakey, nonce) HashWriter := sha3.NewShake256() CyDWriter := io.MultiWriter(LimitedSizeWriteToFilei, HashWriter) Data_writer := NewEncryptedWriter(cryptos, CyDWriter) CompressedStream := snappy.NewWriter(Data_writer) TarStream := tar.NewWriter(CompressedStream) GenFileList(ar.in_dir) for id := range rfi { filedes, err := os.Open(ar.in_dir + "/" + rfi[id]) if err != nil { fmt.Println("Failed to open file " + rfi[id] + ":" + err.Error()) } filein, _ := filedes.Stat() hdr := &tar.Header{ Name: rfi[id], Mode: 0600, Size: filein.Size(), } if err := TarStream.WriteHeader(hdr); err != nil { log.Fatalln(err) } _, err = io.Copy(TarStream, filedes) if err != nil { fmt.Println("Failed to Write file " + rfi[id] + ":" + err.Error()) } filedes.Close() } if err := TarStream.Close(); err != nil { log.Fatalln(err) } _, _, nd := LimitedSizeWriteToFilei.Finialize() FileHash := make([]byte, 64) HashWriter.Read(FileHash) var poly1305sum [16]byte var poly1305sum_key [32]byte copy(poly1305sum_key[:], poly1305key) poly1305.Sum(&poly1305sum, FileHash, &poly1305sum_key) err = db.Put([]byte("poly1305sum"), poly1305sum[:]) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } bb := new(bytes.Buffer) binary.Write(bb, binary.LittleEndian, nd) err = db.Put([]byte("packagesum"), bb.Bytes()) if err != nil { fmt.Println(err.Error()) os.Exit(-1) } //we won't use it anymore if err := tx.Commit(); err != nil { fmt.Println(err.Error()) os.Exit(-1) } dbi.Close() //finially we call par2 to compute reconstruction data if ar.parrate != 0 { _, err := exec.LookPath("par2") if err != nil { fmt.Println("Unable to whereis par2, reconstruction data compute was ignored:" + err.Error()) } DirIf, _ := os.Open(ar.out_dir) DirIfs, _ := DirIf.Readdirnames(-1) cmdargs := []string{"c", "-a", "mdpp", "-r" + strconv.Itoa(ar.parrate), "-v", "--"} cmdargs = append(cmdargs, DirIfs...) cmd := exec.Command("par2", cmdargs...) cmd.Stdout = os.Stdout Absp, _ := filepath.Abs(ar.out_dir) cmd.Dir = Absp err = cmd.Start() if err != nil { fmt.Println("Unable to exec par2, reconstruction data compute was ignored:" + err.Error()) } err = cmd.Wait() if err != nil { fmt.Println("par2 was finished unsuccessfully, reconstruction data compute was ignored(or failed):" + err.Error()) } } fmt.Printf("Hash: %x\n", FileHash) fmt.Printf("Key: %s\n", ar.secret_key) }