// convertIPPDateToTime converts an RFC 2579 date to a time.Time object. func convertIPPDateToTime(date *C.ipp_uchar_t) time.Time { r := bytes.NewReader(C.GoBytes(unsafe.Pointer(date), 11)) var year uint16 var month, day, hour, min, sec, dsec uint8 binary.Read(r, binary.BigEndian, &year) binary.Read(r, binary.BigEndian, &month) binary.Read(r, binary.BigEndian, &day) binary.Read(r, binary.BigEndian, &hour) binary.Read(r, binary.BigEndian, &min) binary.Read(r, binary.BigEndian, &sec) binary.Read(r, binary.BigEndian, &dsec) var utcDirection, utcHour, utcMin uint8 binary.Read(r, binary.BigEndian, &utcDirection) binary.Read(r, binary.BigEndian, &utcHour) binary.Read(r, binary.BigEndian, &utcMin) var utcOffset time.Duration utcOffset += time.Duration(utcHour) * time.Hour utcOffset += time.Duration(utcMin) * time.Minute var loc *time.Location if utcDirection == '-' { loc = time.FixedZone("", -int(utcOffset.Seconds())) } else { loc = time.FixedZone("", int(utcOffset.Seconds())) } nsec := int(dsec) * 100 * int(time.Millisecond) return time.Date(int(year), time.Month(month), int(day), int(hour), int(min), int(sec), nsec, loc) }
func ReadNewInvalidState(in io.Reader) *NewInvalidState { var reason, gameMode byte binary.Read(in, ByteOrder, &reason) binary.Read(in, ByteOrder, &gameMode) ptr := NewInvalidState{reason, gameMode} return &ptr }
func (e *indexEntry) ReadFrom(r io.Reader) (n int64, err error) { var deleted byte if err = binary.Read(r, binary.BigEndian, &deleted); err != nil { return 0, err } e.deleted = (deleted != 0) var value_len uint32 if err = binary.Read(r, binary.BigEndian, &value_len); err != nil { return 0, err } value := make([]byte, int(value_len)) for i := 0; i < int(value_len); i++ { var b byte if err = binary.Read(r, binary.BigEndian, &b); err != nil { return 0, err } value[i] = b } e.value = string(value) var id int64 if err = binary.Read(r, binary.BigEndian, &id); err != nil { return 0, err } e.id = id return int64(binary.Size(deleted) + binary.Size(value) + binary.Size(id)), nil }
// ReadTimestampedEntryInto parses the byte-stream representation of a // TimestampedEntry from |r| and populates the struct |t| with the data. See // RFC section 3.4 for details on the format. // Returns a non-nil error if there was a problem. func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error { var err error if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil { return err } if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil { return err } switch t.EntryType { case X509LogEntryType: if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil { return err } case PrecertLogEntryType: if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil { return err } if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil { return err } case XJSONLogEntryType: if t.JSONData, err = readVarBytes(r, JSONLengthBytes); err != nil { return err } default: return fmt.Errorf("unknown EntryType: %d", t.EntryType) } t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes) return nil }
func readStats(r io.Reader, typ string) (*ReferenceStats, error) { var ( vOff uint64 stats ReferenceStats err error ) err = binary.Read(r, binary.LittleEndian, &vOff) if err != nil { return nil, fmt.Errorf("%s: failed to read index stats chunk begin virtual offset: %v", typ, err) } stats.Chunk.Begin = makeOffset(vOff) err = binary.Read(r, binary.LittleEndian, &vOff) if err != nil { return nil, fmt.Errorf("%s: failed to read index stats chunk end virtual offset: %v", typ, err) } stats.Chunk.End = makeOffset(vOff) err = binary.Read(r, binary.LittleEndian, &stats.Mapped) if err != nil { return nil, fmt.Errorf("%s: failed to read index stats mapped count: %v", typ, err) } err = binary.Read(r, binary.LittleEndian, &stats.Unmapped) if err != nil { return nil, fmt.Errorf("%s: failed to read index stats unmapped count: %v", typ, err) } return &stats, nil }
func (w *Watcher) handleEventAll(data []byte) { buf := bytes.NewBuffer(data) msg := &cnMsg{} hdr := &procEventHeader{} binary.Read(buf, byteOrder, msg) binary.Read(buf, byteOrder, hdr) switch hdr.What { case PROC_EVENT_FORK: event := &forkProcEvent{} binary.Read(buf, byteOrder, event) ppid := int(event.ParentTgid) pid := int(event.ChildTgid) w.Fork <- &ProcEventFork{ParentPid: ppid, ChildPid: pid} case PROC_EVENT_EXEC: event := &execProcEvent{} binary.Read(buf, byteOrder, event) pid := int(event.ProcessTgid) w.Exec <- &ProcEventExec{Pid: pid} case PROC_EVENT_EXIT: event := &exitProcEvent{} binary.Read(buf, byteOrder, event) pid := int(event.ProcessTgid) w.Exit <- &ProcEventExit{Pid: pid} } }
// Parse initialises the read loop and begins parsing the incoming request func (p *Parser) Parse() { b := make([]byte, 2) Read: for { n, err := p.Conn.Read(b) switch { case err == io.EOF: break Read case n == 0: goto Read } switch string(b) { case "1W": // window length binary.Read(p.Conn, binary.BigEndian, &p.wlen) case "1C": // frame length binary.Read(p.Conn, binary.BigEndian, &p.plen) if err := p.read(); err != nil { log.Printf("[%s] error parsing %v", p.Conn.RemoteAddr().String(), err) break Read } if err := p.ack(); err != nil { log.Printf("[%s] error acking %v", p.Conn.RemoteAddr().String(), err) break Read } default: // This really shouldn't happen log.Printf("[%s] Received unknown type", p.Conn.RemoteAddr().String(), err) break Read } } }
func handleHead(reader io.Reader) (seq []byte, flags uint8, count uint16, err error) { var seqLength uint16 err = binary.Read(reader, binary.BigEndian, &seqLength) if err != nil { return } seq = make([]byte, seqLength) _, err = io.ReadFull(reader, seq) if err != nil { fmt.Println("read seq") return } err = binary.Read(reader, binary.BigEndian, &flags) if err != nil { fmt.Println("read flags") return } err = binary.Read(reader, binary.BigEndian, &count) if err != nil { fmt.Println("read count") return } return }
// Decode fills the packet withinformation of the packet. func (r *wireMasterResponse) Decode(packet io.Reader, n int) error { if r.Ips == nil { r.Ips = make([]wireIP, 0, 50) } err := binary.Read(packet, byteOrder, &r.Head) if err != nil { return err } if !reflect.DeepEqual(r.Head.Magic, masterResponseHeader) { return errors.New("Header does not match.") } remaining := n - binary.Size(r.Head.Magic) ipsize := binary.Size(wireIP{}) for ; remaining >= ipsize; remaining -= ipsize { ip := wireIP{} // Normal little endian read. if err := binary.Read(packet, byteOrder, &ip.Oct); err != nil { return err } // Seperate read because of big endian requirement if err := binary.Read(packet, binary.BigEndian, &ip.Port); err != nil { return err } r.Ips = append(r.Ips, ip) } return nil }
func Parse(d []byte) (*Key, error) { if len(d) != 78 { return nil, fmt.Errorf("Input of wrong length %d (expected 78).", len(d)) } b := bytes.NewBuffer(d) var version uint32 binary.Read(b, binary.BigEndian, &version) k := Key{version: version} binary.Read(b, binary.BigEndian, &k.depth) k.parent = make([]byte, 4) b.Read(k.parent) binary.Read(b, binary.BigEndian, &k.index) k.code = make([]byte, 32) b.Read(k.code) ser := make([]byte, 33) b.Read(ser) switch version { case BitcoinExtendedPrivateKeyVersion, BitcoinTestnetExtendedPrivateKeyVersion: k.prvKey = parse256(ser[1:33]) case BitcoinExtendedPublicKeyVersion, BitcoinTestnetExtendedPublicKeyVersion: k.pubKey = ser default: return nil, fmt.Errorf("Input has unrecognized version %x.", version) } return &k, nil }
func makeUniqueMcs(catRange uint32) (string, error) { var ( n uint32 c1, c2 uint32 mcs string tries = 1000000 err error ) for i := 0; i < tries; i++ { binary.Read(rand.Reader, binary.LittleEndian, &n) c1 = n % catRange binary.Read(rand.Reader, binary.LittleEndian, &n) c2 = n % catRange if c1 == c2 { continue } if c1 > c2 { c1, c2 = c2, c1 } mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) err = mcsAdd(mcs) if err == nil { return mcs, nil } } return "", fmt.Errorf("couldn't generate unique MCS after %d tries! (last err=%v)", tries, err) }
// Read a protobuf message from a client func (client *Client) readProtoMessage() (msg *Message, err error) { var ( length uint32 kind uint16 ) // Read the message type (16-bit big-endian unsigned integer) err = binary.Read(client.reader, binary.BigEndian, &kind) if err != nil { return } // Read the message length (32-bit big-endian unsigned integer) err = binary.Read(client.reader, binary.BigEndian, &length) if err != nil { return } buf := make([]byte, length) _, err = io.ReadFull(client.reader, buf) if err != nil { return } msg = &Message{ buf: buf, kind: kind, client: client, } return }
func readVendorID(b []byte) (leftovers []byte, vendorID uint16, err error) { buf := bytes.NewBuffer(b) leftovers = b var openByte, appByte, closeByte byte if openByte, err = buf.ReadByte(); openByte != 0x3e { return } if appByte, err = buf.ReadByte(); appByte != 0x22 && appByte != 0x21 { return } if appByte == 0x22 { if err = binary.Read(buf, binary.BigEndian, &vendorID); err != nil { return } } else { var vendorIDByte byte if err = binary.Read(buf, binary.BigEndian, &vendorIDByte); err != nil { return } vendorID = uint16(vendorIDByte) } if closeByte, err = buf.ReadByte(); closeByte != 0x3f { return } bytesRead := len(b) - buf.Len() leftovers = b[bytesRead:] return }
func (pbp *PBP) Read(rc io.ReadCloser) error { binary.Read(rc, binary.LittleEndian, &pbp.cookie) if pbp.cookie == 0x464C457f { fmt.Printf("File is an elf, converting to empty PBP") bytes, _ := ioutil.ReadAll(rc) pbp.data[6] = append([]byte{0x7f, 0x45, 0x4c, 0x46}[:], bytes...) pbp.cookie = 0x50425000 pbp.version = 0x00010000 return nil } if pbp.cookie != 0x50425000 { return errors.New("bad cookie") } binary.Read(rc, binary.LittleEndian, &pbp.version) for i := 0; i < 8; i++ { binary.Read(rc, binary.LittleEndian, &pbp.offsets[i]) } for i := 0; i < 7; i++ { pbp.data[i] = make([]byte, pbp.offsets[i+1]-pbp.offsets[i]) if len(pbp.data[i]) > 0 { _, err := rc.Read(pbp.data[i]) if err != nil { return err } } } var err error pbp.data[7], err = ioutil.ReadAll(rc) return err }
// 解析内部数据包 func UnpackageData(body []byte) (PkgHead, []byte, InnerPkgTail, error) { p := bytes.NewReader(body) head := PkgHead{} tail := InnerPkgTail{} var jsonStr []byte err := binary.Read(p, binary.BigEndian, &head) if err != nil { return head, nil, tail, errors.New("read pkghead error!!!", err, body) } if int(head.PkgLen)+SIZEOF_INNERTAIL != len(body) { return head, nil, tail, errors.New("data package len error!!!", head.PkgLen, len(body)) } jsonStr = make([]byte, int(head.PkgLen)-SIZEOF_PKGHEAD) if err := binary.Read(p, binary.BigEndian, &jsonStr); err != nil { return head, nil, tail, errors.New("read pkgbody error!!!", err, body) } if err := binary.Read(p, binary.BigEndian, &tail); err != nil { return head, nil, tail, errors.New("read pkgtail error!!!", err, body) } return head, jsonStr, tail, nil }
// readPacket reads a packet from r.rd into r.buf. EOF is returned if // r.rd.Read() returns it. Nothing is appended to r.buf if err != nil. func (r *SecureReader) readPacket() error { // 1. Read the packet header hdr := &PacketHdr{} if err := binary.Read(r.rd, binary.BigEndian, hdr); err != nil { return err } // 2. Read the encrypted data encData := make([]byte, hdr.Length) if _, err := io.ReadFull(r.rd, encData); err != nil { return err } // 3. Decrypt and verify the data b, ok := box.OpenAfterPrecomputation(nil, encData, &hdr.Nonce, r.key) if !ok { return ErrFailedVerify } buf := bytes.NewReader(b) // 4. Extract the sequence number and match it to the internal counter var seq uint32 if err := binary.Read(buf, binary.BigEndian, &seq); err != nil { return err } if seq != r.seq { return ErrOutOfOrder } r.seq += 2 // Success! Note: this append will cause the reallocation of r.buf when // it grows too much, and with that the old returned data will be discarded r.buf = append(r.buf, b[len(b)-buf.Len():]...) return nil }
// Unmarshals the message. func (g *Goodbye) Unmarshal(frames ...[]byte) error { if frames == nil { return errors.New("Can't unmarshal empty message") } frame := frames[0] frames = frames[1:] buffer := bytes.NewBuffer(frame) // Get and check protocol signature var signature uint16 binary.Read(buffer, binary.BigEndian, &signature) if signature != Signature { return errors.New("invalid signature") } // Get message id and parse per message type var id uint8 binary.Read(buffer, binary.BigEndian, &id) if id != GoodbyeId { return errors.New("malformed Goodbye message") } return nil }
func (u *UDP) Write(b []byte) (n int, err error) { buf := bytes.NewBuffer(b) if err = binary.Read(buf, binary.BigEndian, &u.PortSrc); err != nil { return } n += 2 if err = binary.Read(buf, binary.BigEndian, &u.PortDst); err != nil { return } n += 2 if err = binary.Read(buf, binary.BigEndian, &u.Length); err != nil { return } n += 2 if err = binary.Read(buf, binary.BigEndian, &u.Checksum); err != nil { return } n += 2 if u.Length > 8 { u.Data = make([]byte, u.Length-8) } if u.Length == 0 { u.Data = make([]byte, buf.Len()) } m, err := io.ReadFull(buf, u.Data) n += m return }
func uniqMcs(catRange uint32) string { var ( n uint32 c1, c2 uint32 mcs string ) for { binary.Read(rand.Reader, binary.LittleEndian, &n) c1 = n % catRange binary.Read(rand.Reader, binary.LittleEndian, &n) c2 = n % catRange if c1 == c2 { continue } else { if c1 > c2 { t := c1 c1 = c2 c2 = t } } mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) if err := mcsAdd(mcs); err != nil { continue } break } return mcs }
func NewFS(r io.ReadSeeker) *FS { bpb32 := &BPB32{} err := binary.Read(r, binary.LittleEndian, bpb32) if err != nil { // TODO error handling } fs := &FS{bpb32, UnknownType, r} t := fs.DetermineType() switch t { case FAT32: fs.Type = FAT32 return fs case FAT12, FAT16: // reread the BPB, this time for the correct fs type bpb16 := &BPB16{} r.Seek(0, 0) err := binary.Read(r, binary.LittleEndian, bpb16) if err != nil { // TODO error handling } bpb32 = &BPB32{bpb16.BPBBase, BPB32Base{0, 0, 0, 0, 0, 0, [12]byte{}}, bpb16.BPB16Base} fs = &FS{bpb32, t, r} } return fs }
// readKV parses key value pairs from within the payload func (p *Parser) readKV() ([]byte, []byte, error) { var klen, vlen uint32 // Read key len binary.Read(p.buffer, binary.BigEndian, &klen) if klen > maxKeyLen { return nil, nil, fmt.Errorf("key exceeds max len %d, got %d bytes", maxKeyLen, klen) } // Read key key := make([]byte, klen) _, err := p.buffer.Read(key) if err != nil { return nil, nil, err } // Read value len binary.Read(p.buffer, binary.BigEndian, &vlen) if vlen > maxValueLen { return nil, nil, fmt.Errorf("value exceeds max len %d, got %d bytes", maxValueLen, vlen) } // Read value value := make([]byte, vlen) _, err = p.buffer.Read(value) if err != nil { return nil, nil, err } return key, value, nil }
func (fs FS) ReadFAT(cluster uint32) (newCluster uint32, status ClusterStatus) { secFAT, offsetFAT := fs.ClusterToFATEntry(cluster) byteFATStart := secFAT * uint32(fs.BPB.BytsPerSec) fs.Data.Seek(int64(byteFATStart+offsetFAT), 0) t := fs.DetermineType() if t == FAT12 { var fat uint16 binary.Read(fs.Data, binary.LittleEndian, &fat) if cluster%2 == 0 { fat &= 0x0FFF } else { fat >>= 4 } newCluster = uint32(fat) } else if t == FAT16 { var fat uint16 binary.Read(fs.Data, binary.LittleEndian, &fat) newCluster = uint32(fat) } else { var fat uint32 binary.Read(fs.Data, binary.LittleEndian, &fat) fat &= 0x0FFFFFFF newCluster = fat } status = fs.ClusterStatus(newCluster) return }
func readChunks(r io.Reader, n int32, typ string) ([]bgzf.Chunk, error) { if n == 0 { return nil, nil } var ( vOff uint64 err error ) chunks := make([]bgzf.Chunk, n) for i := range chunks { err = binary.Read(r, binary.LittleEndian, &vOff) if err != nil { return nil, fmt.Errorf("%s: failed to read chunk begin virtual offset: %v", typ, err) } chunks[i].Begin = makeOffset(vOff) err = binary.Read(r, binary.LittleEndian, &vOff) if err != nil { return nil, fmt.Errorf("%s: failed to read chunk end virtual offset: %v", typ, err) } chunks[i].End = makeOffset(vOff) } if !sort.IsSorted(byBeginOffset(chunks)) { sort.Sort(byBeginOffset(chunks)) } return chunks, nil }
func (d *Data) UnMarshal(buf []byte) error { n := len(buf) r := bytes.NewReader(buf) err := binary.Read(r, binary.BigEndian, &d.Head) if err != nil { return err } d.SubData = []SubData{} for i := binary.Size(d.Head); i < n; { sub := SubData{} err = binary.Read(r, binary.BigEndian, &sub.Head) if err != nil { return err } i += int(binary.Size(sub.Head)) sub.Params = []tlv.TLV{} for j := 0; j < int(sub.Head.ParamsCount); j++ { param := tlv.TLV{} param.FromBinary(r) i += int(param.Length()) sub.Params = append(sub.Params, param) } d.SubData = append(d.SubData, sub) } return nil }
// Deserialize the Mealy machine from a Reader. func ReadFrom(r io.Reader) (self Recognizer, err error) { // Read version string, then all states in order (each is a slice over // uint32). versionString := make([]byte, len(serializationPrefix)) if err = binary.Read(r, binary.BigEndian, versionString); err != nil { return } var numStates int32 if err = binary.Read(r, binary.BigEndian, &numStates); err != nil { return } self = make(Recognizer, numStates) for i := 0; i < int(numStates); i++ { var numTransitions byte if err = binary.Read(r, binary.BigEndian, &numTransitions); err != nil { return } st := make(state, numTransitions) for t := 0; t < int(numTransitions); t++ { var tr transition if err = binary.Read(r, binary.BigEndian, &tr); err != nil { return } st[t] = tr } self[i] = st } return }
func (space *Space) request(requestId int32, body *bytes.Buffer) (tuples [][][]byte, err error) { var ( returnCode int32 tuplesCount int32 tuplesSize int32 cardinality int32 size uint64 response *iproto.Response ) response, err = space.conn.Request(requestId, body) if err != nil { return } // Ping has no Body if requestId == PingOp { tuples = [][][]byte{} return } err = binary.Read(response.Body, binary.LittleEndian, &returnCode) if err != nil { return } if returnCode != 0 { err = fmt.Errorf("Return code is not 0, but %d; Error message: %s", returnCode, response.Body.String()) return } err = binary.Read(response.Body, binary.LittleEndian, &tuplesCount) if err != nil { return } tuples = make([][][]byte, tuplesCount) for i := int32(0); i < tuplesCount && response.Body.Len() > 0; i++ { err = binary.Read(response.Body, binary.LittleEndian, &tuplesSize) if err != nil { return } err = binary.Read(response.Body, binary.LittleEndian, &cardinality) if err != nil { return } tuples[i] = make([][]byte, cardinality) for j := int32(0); j < cardinality; j++ { size, err = binary.ReadUvarint(response.Body) if err != nil { return } tuples[i][j] = make([]byte, size) _, err = response.Body.Read(tuples[i][j]) if err != nil { return } } } return }
func ReadConfirmTransaction(in io.Reader) *ConfirmTransaction { res := new(ConfirmTransaction) binary.Read(in, ByteOrder, &res.window_id) binary.Read(in, ByteOrder, &res.transaction_id) binary.Read(in, ByteOrder, &res.accepted) return res }
// NewUnpacker returns a pointer to Unpacker which can be used to read // individual Blobs from a pack. func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { var err error ls := binary.Size(uint32(0)) // reset to the end to read header length _, err = rd.Seek(-int64(ls), 2) if err != nil { return nil, fmt.Errorf("seeking to read header length failed: %v", err) } var length uint32 err = binary.Read(rd, binary.LittleEndian, &length) if err != nil { return nil, fmt.Errorf("reading header length failed: %v", err) } // reset to the beginning of the header _, err = rd.Seek(-int64(ls)-int64(length), 2) if err != nil { return nil, fmt.Errorf("seeking to read header length failed: %v", err) } // read header hrd, err := crypto.DecryptFrom(k, io.LimitReader(rd, int64(length))) if err != nil { return nil, err } var entries []Blob pos := uint(0) for { e := headerEntry{} err = binary.Read(hrd, binary.LittleEndian, &e) if err == io.EOF { break } if err != nil { return nil, err } entries = append(entries, Blob{ Type: e.Type, Length: uint(e.Length), ID: e.ID, Offset: pos, }) pos += uint(e.Length) } p := &Unpacker{ rd: rd, k: k, Entries: entries, } return p, nil }
func parseSFNT(r io.ReaderAt, headerOffset int64, table map[int64]Table) (SFNT, error) { header := new(SfntHeader) headerSize := int64(binary.Size(header)) sr := io.NewSectionReader(r, headerOffset, headerSize) if err := binary.Read(sr, binary.BigEndian, header); err != nil { return nil, err } numTables := header.NumTables offsetTable := make([]OffsetEntry, numTables) sr = io.NewSectionReader(r, headerOffset+headerSize, int64(binary.Size(offsetTable))) if err := binary.Read(sr, binary.BigEndian, offsetTable); err != nil { return nil, err } tableMap := make(SFNT) for _, entry := range offsetTable { tag := entry.Tag.String() offset := int64(entry.Offset) size := int64(entry.Length) if v, ok := table[offset]; ok { tableMap[tag] = v } else { v = &DefaultTable{entry.Tag, io.NewSectionReader(r, offset, size)} table[offset] = v tableMap[tag] = v } } for _, p := range DefaultParser { for i, v := range tableMap { tableMap[i] = p.Parse(tableMap, v) } } return tableMap, nil }
// DecodeDir parses a tiff-encoded IFD from r and returns a Dir object. offset // is the offset to the next IFD. The first read from r should be at the first // byte of the IFD. ReadAt offsets should generally be relative to the // beginning of the tiff structure (not relative to the beginning of the IFD). func DecodeDir(r ReadAtReader, order binary.ByteOrder) (d *Dir, offset int32, err error) { d = new(Dir) // get num of tags in ifd var nTags int16 err = binary.Read(r, order, &nTags) if err != nil { return nil, 0, errors.New("tiff: failed to read IFD tag count: " + err.Error()) } // load tags for n := 0; n < int(nTags); n++ { t, err := DecodeTag(r, order) if err != nil { return nil, 0, err } d.Tags = append(d.Tags, t) } // get offset to next ifd err = binary.Read(r, order, &offset) if err != nil { return nil, 0, errors.New("tiff: falied to read offset to next IFD: " + err.Error()) } return d, offset, nil }