func TestMultiCoreChecksummedWriter(t *testing.T) { buf := &bytes.Buffer{} cw := NewMultiCoreChecksummedWriter(buf, 16, crc32.NewIEEE, runtime.GOMAXPROCS(0)) if cw == nil { t.Fatal(cw) } n, err := cw.Write([]byte("12345678901234567890")) if n != 20 { t.Fatal(n) } if err != nil { t.Fatal(err) } hash := crc32.NewIEEE() hash.Write([]byte("1234567890123456")) n, err = cw.Write([]byte("ghijklmnopqrstuvwxyz")) if n != 20 { t.Fatal(n) } if err != nil { t.Fatal(err) } hash2 := crc32.NewIEEE() hash2.Write([]byte("7890ghijklmnopqr")) err = cw.Close() if err != nil { t.Fatal(err) } hash3 := crc32.NewIEEE() hash3.Write([]byte("stuvwxyz")) if !bytes.Equal(buf.Bytes(), []byte("1234567890123456"+string(hash.Sum(nil))+"7890ghijklmnopqr"+string(hash2.Sum(nil))+"stuvwxyz"+string(hash3.Sum(nil)))) { t.Fatalf("%#v", string(buf.Bytes())) } }
func makeOuts(testIntents []*intents.Intent, demux *Demultiplexer, outChecksum map[string]hash.Hash, demuxOuts map[string]*RegularCollectionReceiver, outLengths map[string]*int, errCh chan<- error) { for _, dbc := range testIntents { ns := dbc.Namespace() sum := crc32.NewIEEE() muxOut := &RegularCollectionReceiver{ Intent: dbc, Demux: demux, Origin: ns, } outLength := 0 outChecksum[ns] = sum demuxOuts[ns] = muxOut outLengths[ns] = &outLength demuxOuts[ns].Open() go func() { bs := make([]byte, db.MaxBSONSize) var err error for { var length int length, err = muxOut.Read(bs) if err != nil { break } sum.Write(bs[:length]) outLength += len(bs[:length]) } if err == io.EOF { err = nil } errCh <- err }() } }
func (m *Dot11) ChecksumValid() bool { // only for CTRL and MGMT frames h := crc32.NewIEEE() h.Write(m.Contents) h.Write(m.Payload) return m.Checksum == h.Sum32() }
func (d *decoder) run() error { // init hash d.crcHash = crc32.NewIEEE() // for each part for { // create a part d.part = new(Part) // read the header if err := d.readHeader(); err != nil { return err } // read part header if available if d.multipart { if err := d.readPartHeader(); err != nil { return err } } // decode the part body if err := d.readBody(); err != nil { return err } // add part to list d.parts = append(d.parts, d.part) // validate part if err := d.part.validate(); err != nil { return err } } return nil }
// Open returns a ReadCloser that provides access to the File's contents. // Multiple files may be read concurrently. func (f *File) Open() (rc io.ReadCloser, err error) { bodyOffset, err := f.findBodyOffset() if err != nil { return } size := int64(f.CompressedSize64) r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) switch f.Method { case Store: // (no compression) rc = ioutil.NopCloser(r) case Deflate: rc, err = zlib.NewReader(r) if err != nil { return } default: err = ErrAlgorithm return } var desr io.Reader if f.hasDataDescriptor() { desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) } rc = &checksumReader{rc, crc32.NewIEEE(), f, desr, nil} return }
// handleIndex returns the index XML file to the client. func (s *EC2HTTPTestStorage) handleIndex(w http.ResponseWriter, req *http.Request) { lbr := &listBucketResult{ Name: "juju-dist", Prefix: "", Marker: "", MaxKeys: 1000, IsTruncated: false, } names := []string{} for name := range s.files { names = append(names, name) } sort.Strings(names) for _, name := range names { h := crc32.NewIEEE() h.Write([]byte(s.files[name])) contents := &contents{ Key: name, LastModified: time.Now(), ETag: fmt.Sprintf("%x", h.Sum(nil)), Size: len([]byte(s.files[name])), StorageClass: "STANDARD", } lbr.Contents = append(lbr.Contents, contents) } buf, err := xml.Marshal(lbr) if err != nil { http.Error(w, fmt.Sprintf("500 %v", err), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/xml") w.Write(buf) }
// decode deserializes one log entry from the passed io.Reader. func (e *logEntry) decode(r io.Reader) error { header := make([]byte, 24) if _, err := r.Read(header); err != nil { return err } command := make([]byte, binary.LittleEndian.Uint32(header[20:24])) if _, err := r.Read(command); err != nil { return err } crc := binary.LittleEndian.Uint32(header[:4]) check := crc32.NewIEEE() check.Write(header[4:]) check.Write(command) if crc != check.Sum32() { return errInvalidChecksum } e.Term = binary.LittleEndian.Uint64(header[4:12]) e.Index = binary.LittleEndian.Uint64(header[12:20]) e.Command = command return nil }
// UnmarshalBinary reads header from the provided data slice. func (h *header) UnmarshalBinary(data []byte) error { // header length if len(data) != HeaderLen { return errors.New("xz: wrong file header length") } // magic header if !bytes.Equal(headerMagic, data[:6]) { return errHeaderMagic } // checksum crc := crc32.NewIEEE() crc.Write(data[6:8]) if uint32LE(data[8:]) != crc.Sum32() { return errors.New("xz: invalid checksum for file header") } // stream flags if data[6] != 0 { return errInvalidFlags } flags := data[7] if err := verifyFlags(flags); err != nil { return err } h.flags = flags return nil }
// MarshalBinary converts footer values into an xz file footer. Note // that the footer value is checked for correctness. func (f *footer) MarshalBinary() (data []byte, err error) { if err = verifyFlags(f.flags); err != nil { return nil, err } if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { return nil, errors.New("xz: index size out of range") } if f.indexSize%4 != 0 { return nil, errors.New( "xz: index size not aligned to four bytes") } data = make([]byte, footerLen) // backward size (index size) s := (f.indexSize / 4) - 1 putUint32LE(data[4:], uint32(s)) // flags data[9] = f.flags // footer magic copy(data[10:], footerMagic) // CRC-32 crc := crc32.NewIEEE() crc.Write(data[4:10]) putUint32LE(data, crc.Sum32()) return data, nil }
func SmallIndexer(gene genome.Gene) (index []byte) { hash := crc32.NewIEEE() // //TODO: fix me hash.Write(gene) index = hash.Sum() return }
// Used internally to create and initialize a new request. func newRequest(request *http.Request, conn net.Conn, startTime time.Time) *Request { fReq := new(Request) fReq.Context = make(map[string]interface{}) fReq.HttpRequest = request fReq.StartTime = startTime fReq.connection = conn if conn != nil { fReq.RemoteAddr = conn.RemoteAddr().(*net.TCPAddr) } // create a semi-unique id to track a connection in the logs // ID is the least significant decimal digits of time with some randomization // the last 3 zeros of time.Nanoseconds appear to always be zero var ut = fReq.StartTime.UnixNano() fReq.ID = fmt.Sprintf("%010x", (ut-(ut-(ut%1e12)))+int64(rand.Intn(999))) fReq.PipelineStageStats = list.New() fReq.pipelineHash = crc32.NewIEEE() // Support for 100-continue requests // http.Server (and presumably google app engine) already handle this // case. So we don't need to do anything if we don't own the // connection. if conn != nil && request.Header.Get("Expect") == "100-continue" { request.Body = &continueReader{req: fReq, r: request.Body} } return fReq }
func TestFileBigWriteWeirdBlockSize(t *testing.T) { client := getClient(t) baleet(t, "/_test/create/4.txt") mkdirp(t, "/_test/create") writer, err := client.CreateFile("/_test/create/4.txt", 1, 1050000, 0755) require.NoError(t, err) mobydick, err := os.Open("test/mobydick.txt") require.NoError(t, err) n, err := io.Copy(writer, mobydick) require.NoError(t, err) assert.EqualValues(t, 1257276, n) err = writer.Close() require.NoError(t, err) reader, err := client.Open("/_test/create/4.txt") require.NoError(t, err) hash := crc32.NewIEEE() n, err = io.Copy(hash, reader) assert.Nil(t, err) assert.EqualValues(t, 1257276, n) assert.EqualValues(t, 0x199d1ae6, hash.Sum32()) }
func ProcessBuffer(c <-chan m.MetricDefinition) { buf := make(map[uint32][]m.MetricDefinition) // flush buffer 10 times every second t := time.NewTicker(time.Millisecond * 100) for { select { case b := <-c: if b.OrgId != 0 { //get hash. h := crc32.NewIEEE() h.Write([]byte(b.Name)) hash := h.Sum32() % uint32(1024) if _, ok := buf[hash]; !ok { buf[hash] = make([]m.MetricDefinition, 0) } buf[hash] = append(buf[hash], b) } case <-t.C: //copy contents of buffer for hash, metrics := range buf { currentBuf := make([]m.MetricDefinition, len(metrics)) copy(currentBuf, metrics) delete(buf, hash) //log.Info(fmt.Sprintf("flushing %d items in buffer now", len(currentBuf))) msgString, err := json.Marshal(currentBuf) if err != nil { log.Error(0, "Failed to marshal metrics payload.", err) } else { go Publish(fmt.Sprintf("%d", hash), msgString) } } } } }
func newReader(fn string) (*reader, error) { f, err := os.OpenFile(fn, os.O_RDONLY, 0) if err != nil { return nil, err } defer f.Close() fi, err := f.Stat() if err != nil { return nil, err } var data gommap.MMap var zero bool if fi.Size() <= 0 { zero = true } else { data, err = gommap.Map(f.Fd(), gommap.PROT_READ, gommap.MAP_SHARED) if err != nil { return nil, err } } r := &reader{ cs: crc32.NewIEEE(), data: data, zero: zero, done: zero, // noop reader if zero-byte file } r.bv.Data = data return r, nil }
func checksumsPass(romname string, rom *clrmamepro.Block, f io.Reader) bool { crc := crc32.NewIEEE() md := md5.New() sha := sha1.New() checksums := io.MultiWriter(crc, md, sha) _, err := io.Copy(checksums, f) if err != nil { die("checksumming ROM %q: %v", romname, err) } compare := func(h hash.Hash, expected string) bool { return strings.ToUpper(fmt.Sprintf("%x", h.Sum(nil))) == strings.ToUpper(expected) } if !compare(crc, rom.Texts[fCRC32]) { alert("BAD CRC32", romname) return false } if !compare(md, rom.Texts[fMD5]) { alert("BAD MD5", romname) return false } if !compare(sha, rom.Texts[fSHA1]) { alert("BAD SHA1", romname) return false } return true }
// UnmarshalBinary sets the footer value by unmarshalling an xz file // footer. func (f *footer) UnmarshalBinary(data []byte) error { if len(data) != footerLen { return errors.New("xz: wrong footer length") } // magic bytes if !bytes.Equal(data[10:], footerMagic) { return errors.New("xz: footer magic invalid") } // CRC-32 crc := crc32.NewIEEE() crc.Write(data[4:10]) if uint32LE(data) != crc.Sum32() { return errors.New("xz: footer checksum error") } var g footer // backward size (index size) g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 // flags if data[8] != 0 { return errInvalidFlags } g.flags = data[9] if err := verifyFlags(g.flags); err != nil { return err } *f = g return nil }
func BenchmarkEncoding(b *testing.B) { h := crc32.NewIEEE() k := 1 input := make([]byte, b.N) b.ResetTimer() _, _ = Encode(k, h, input, len(input)*2) }
// Test a TxRecord with some trailing bytes func TestTrailingBytesTxRecord(t *testing.T) { buf, _, err := genTxValue(0xfa, []byte{0xff, 0xff, 0xff}) // Add some trailing bytes to the tx record buf = append(buf, byte(0xff)) buf = append(buf, byte(0xff)) buf = append(buf, byte(0xff)) summer := crc32.NewIEEE() _, err = summer.Write(buf) if err != nil { t.Error(err) } crc32sum := summer.Sum32() r, err := genTestCaseHeader(buf, 1, crc32sum) if err != nil { t.Error(err) } walker, err := NewReaderWalker(r) if err != nil { t.Error(err) } _, err = walker.Next() if err != ErrRemainingBytesForRecord { t.Error(err) } _, err = walker.Next() if err != io.EOF { t.Errorf("expected EOF, got %v", err) } }
// Begin a transaction func (log *Log) BeginTx() *LogTx { tx := &LogTx{} tx.log = log tx.buf = new(bytes.Buffer) tx.crc = crc32.NewIEEE() return tx }
// Generate a byet slice representing an entry in a Tx record func genTxValue(kind uint16, val []byte) (chunk []byte, crc32sum uint32, err error) { buf := new(bytes.Buffer) err = binary.Write(buf, binary.LittleEndian, kind) if err != nil { return nil, 0, err } err = binary.Write(buf, binary.LittleEndian, uint16(len(val))) if err != nil { return nil, 0, err } _, err = buf.Write(val) if err != nil { return nil, 0, err } summer := crc32.NewIEEE() _, err = summer.Write(val) if err != nil { return nil, 0, err } return buf.Bytes(), summer.Sum32(), nil }
func (d *decoder) readBody() error { // ready the part body d.part.Body = make([]byte, 0) // reset special d.awaitingSpecial = false // setup crc hash d.part.crcHash = crc32.NewIEEE() // each line for { line, err := d.buf.ReadBytes('\n') if err != nil { return err } // strip linefeeds (some use CRLF some LF) line = bytes.TrimRight(line, "\r\n") // check for =yend if len(line) >= 5 && string(line[:5]) == "=yend" { return d.parseTrailer(string(line)) } // decode b := d.decode(line) // update hashs d.part.crcHash.Write(b) d.crcHash.Write(b) // decode d.part.Body = append(d.part.Body, b...) } return nil }
func (e *Engine) crc32() error { data, err := computeHash(crc32.NewIEEE(), e.stack.Pop()) if err == nil { e.stack.Push(data) } return err }
func (e *encoder) writeChunk(b []byte, name string) { if e.err != nil { return } n := uint32(len(b)) if int(n) != len(b) { e.err = UnsupportedError(name + " chunk is too large: " + strconv.Itoa(len(b))) return } writeUint32(e.header[0:4], n) e.header[4] = name[0] e.header[5] = name[1] e.header[6] = name[2] e.header[7] = name[3] crc := crc32.NewIEEE() crc.Write(e.header[4:8]) crc.Write(b) writeUint32(e.footer[0:4], crc.Sum32()) _, e.err = e.w.Write(e.header[0:8]) if e.err != nil { return } _, e.err = e.w.Write(b) if e.err != nil { return } _, e.err = e.w.Write(e.footer[0:4]) }
func NewArticle(p []byte, data *ArticleData, subject string) *Article { buf := new(bytes.Buffer) buf.WriteString(fmt.Sprintf("From: %s\r\n", Config.Global.From)) buf.WriteString(fmt.Sprintf("Newsgroups: %s\r\n", Config.Global.DefaultGroup)) buf.WriteString(fmt.Sprintf("Message-ID: <%d$gps@gopoststuff>\r\n", time.Now().UnixNano())) // art.headers['Message-ID'] = '<%.5f.%d@%s>' % (time.time(), partnum, self.conf['server']['hostname']) //headers["X-Newsposter"] = "gopoststuff alpha - https://github.com/madcowfred/gopoststuff" buf.WriteString(fmt.Sprintf("X-Newsposter: gopoststuff alpha - https://github.com/madcowfred/gopoststuff\r\n")) // Build subject // spec: c1 [fnum/ftotal] - "filename" yEnc (pnum/ptotal) buf.WriteString(fmt.Sprintf("Subject: %s [%d/%d] - \"%s\" yEnc (%d/%d)\r\n\r\n", subject, data.FileNum, data.FileTotal, data.FileName, data.PartNum, data.PartTotal)) // yEnc begin line buf.WriteString(fmt.Sprintf("=ybegin part=%d total=%d line=128 size=%d name=%s\r\n", data.PartNum, data.PartTotal, data.FileSize, data.FileName)) // yEnc part line buf.WriteString(fmt.Sprintf("=ypart begin=%d end=%d\r\n", data.PartBegin+1, data.PartEnd)) //log.Debug("%+v", buf) // Encoded data yencode.Encode(p, buf) // yEnc end line h := crc32.NewIEEE() h.Write(p) buf.WriteString(fmt.Sprintf("=yend size=%d part=%d pcrc32=%08X\r\n", data.PartSize, data.PartNum, h.Sum32())) return &Article{Body: buf.Bytes()} }
// Open returns a ReadCloser that provides access to the File's contents. func (f *File) Open() (rc io.ReadCloser, err os.Error) { off := int64(f.headerOffset) size := int64(f.CompressedSize) if f.bodyOffset == 0 { r := io.NewSectionReader(f.zipr, off, f.zipsize-off) if err = readFileHeader(f, r); err != nil { return } if f.bodyOffset, err = r.Seek(0, os.SEEK_CUR); err != nil { return } if size == 0 { size = int64(f.CompressedSize) } } if f.hasDataDescriptor() && size == 0 { // permit SectionReader to see the rest of the file size = f.zipsize - (off + f.bodyOffset) } r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size) switch f.Method { case Store: // (no compression) rc = ioutil.NopCloser(r) case Deflate: rc = flate.NewReader(r) default: err = UnsupportedMethod } if rc != nil { rc = &checksumReader{rc, crc32.NewIEEE(), f, r} } return }
func NewChecksumIndexOutput(main IndexOutput) *ChecksumIndexOutput { return &ChecksumIndexOutput{ IndexOutputImpl: NewIndexOutput(main), main: main, digest: crc32.NewIEEE(), } }
// Open方法返回一个io.ReadCloser接口,提供读取文件内容的方法。 // 可以同时读取多个文件。 func (f *File) Open() (rc io.ReadCloser, err error) { bodyOffset, err := f.findBodyOffset() if err != nil { return } size := int64(f.CompressedSize64) r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) dcomp := decompressor(f.Method) if dcomp == nil { err = ErrAlgorithm return } rc = dcomp(r) var desr io.Reader if f.hasDataDescriptor() { desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) } rc = &checksumReader{ rc: rc, hash: crc32.NewIEEE(), f: f, desr: desr, } return }
// CreateHeader adds a file to the zip file using the provided FileHeader // for the file metadata. // It returns a Writer to which the file contents should be written. // // The file's contents must be written to the io.Writer before the next // call to Create, CreateHeader, or Close. The provided FileHeader fh // must not be modified after a call to CreateHeader. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { if w.last != nil && !w.last.closed { if err := w.last.close(); err != nil { return nil, err } } if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { // See https://golang.org/issue/11144 confusion. return nil, errors.New("archive/zip: invalid duplicate FileHeader") } fh.Flags |= 0x8 // we will write a data descriptor // TODO(alex): Look at spec and see if these need to be changed // when using encryption. fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte fh.ReaderVersion = zipVersion20 fw := &fileWriter{ zipw: w.cw, compCount: &countWriter{w: w.cw}, crc32: crc32.NewIEEE(), } // Get the compressor before possibly changing Method to 99 due to password comp := compressor(fh.Method) if comp == nil { return nil, ErrAlgorithm } // check for password var sw io.Writer = fw.compCount if fh.password != nil { // we have a password and need to encrypt. fh.writeWinZipExtra() fh.Method = 99 // ok to change, we've gotten the comp and wrote extra ew, err := newEncryptionWriter(sw, fh.password, fw) if err != nil { return nil, err } sw = ew } var err error fw.comp, err = comp(sw) if err != nil { return nil, err } fw.rawCount = &countWriter{w: fw.comp} h := &header{ FileHeader: fh, offset: uint64(w.cw.count), } w.dir = append(w.dir, h) fw.header = h if err := writeHeader(w.cw, fh); err != nil { return nil, err } w.last = fw return fw, nil }
// Write a record func (rw *Writer) WriteRecord(data []byte, flags Flags) error { if rw.Err != nil { return rw.Err } flags = flags | rw.Flags if flags&NoCompression == 0 { data = snappy.Encode(rw.compressBuf, data) } header := recordHeader{bodyLength: uint32(len(data)), flags: flags} var headerBuf [recordHeaderStorageSize]byte header.encode(headerBuf[:]) if size, _ := rw.bytesWriter.Write(headerBuf[:]); size != recordHeaderStorageSize { return rw.err(ErrWriteBytes) } bodyWriter := checksumWriter{writer: rw.bytesWriter, crc: crc32.NewIEEE()} if size, _ := bodyWriter.Write(data); size != len(data) { return rw.err(ErrWriteBytes) } var checksumBuf [4]byte binary.LittleEndian.PutUint32(checksumBuf[:], bodyWriter.checksum()) if size, _ := rw.bytesWriter.Write(checksumBuf[:]); size != len(checksumBuf) { return rw.err(ErrWriteBytes) } return nil }
func makeIns(testIntents []*intents.Intent, mux *Multiplexer, inChecksum map[string]hash.Hash, muxIns map[string]*MuxIn, inLengths map[string]*int, errCh chan<- error) { for index, dbc := range testIntents { ns := dbc.Namespace() sum := crc32.NewIEEE() muxIn := &MuxIn{Intent: dbc, Mux: mux} inLength := 0 inChecksum[ns] = sum muxIns[ns] = muxIn inLengths[ns] = &inLength go func(index int) { err := muxIn.Open() if err != nil { errCh <- err return } staticBSONBuf := make([]byte, db.MaxBSONSize) for i := 0; i < 10000; i++ { bsonBytes, _ := bson.Marshal(testDoc{Bar: index * i, Baz: ns}) bsonBuf := staticBSONBuf[:len(bsonBytes)] copy(bsonBuf, bsonBytes) muxIn.Write(bsonBuf) sum.Write(bsonBuf) inLength += len(bsonBuf) } err = muxIn.Close() errCh <- err }(index) } }