Beispiel #1
0
func (rad *RadClientType) pwdCrypt(auth, passwd []byte) ([]byte, error) {

	//padding the password if the password not multiple of 16 octets
	text := make([]byte, 16*int(math.Ceil(float64(binary.Size(passwd))/16.0)))
	copy(text, passwd)
	for i := binary.Size(passwd); i < len(text); i++ {
		text[i] = 0
	}
	if len(text) > 128 {
		return nil, errors.New("Password exceeds maxiums of 128 bytes")
	}

	var result = make([]byte, 0)
	last := make([]byte, len(auth))

	copy(last, auth)
	for len(text) > 0 {
		hash := md5.Sum(bytes.Join([][]byte{rad.secret, last}, nil))

		for i := range hash {
			result = append(result, hash[i]^text[i])
		}
		last = result[len(result)-16:]
		text = text[16:]
	}
	return result, nil
}
Beispiel #2
0
func parseSFNT(r io.ReaderAt, headerOffset int64, table map[int64]Table) (SFNT, error) {
	header := new(SfntHeader)
	headerSize := int64(binary.Size(header))
	sr := io.NewSectionReader(r, headerOffset, headerSize)
	if err := binary.Read(sr, binary.BigEndian, header); err != nil {
		return nil, err
	}
	numTables := header.NumTables
	offsetTable := make([]OffsetEntry, numTables)
	sr = io.NewSectionReader(r, headerOffset+headerSize, int64(binary.Size(offsetTable)))
	if err := binary.Read(sr, binary.BigEndian, offsetTable); err != nil {
		return nil, err
	}
	tableMap := make(SFNT)
	for _, entry := range offsetTable {
		tag := entry.Tag.String()
		offset := int64(entry.Offset)
		size := int64(entry.Length)
		if v, ok := table[offset]; ok {
			tableMap[tag] = v
		} else {
			v = &DefaultTable{entry.Tag, io.NewSectionReader(r, offset, size)}
			table[offset] = v
			tableMap[tag] = v
		}
	}
	for _, p := range DefaultParser {
		for i, v := range tableMap {
			tableMap[i] = p.Parse(tableMap, v)
		}
	}
	return tableMap, nil
}
Beispiel #3
0
func parseOptionsTemplateFlowSet(data []byte, header *FlowSetHeader) (interface{}, error) {
	var set OptionsTemplateFlowSet
	var t OptionsTemplateRecord

	set.Id = header.Id
	set.Length = header.Length

	buf := bytes.NewBuffer(data)
	headerLen := binary.Size(t.TemplateId) + binary.Size(t.ScopeLength) + binary.Size(t.OptionLength)
	for buf.Len() >= 4 { // Padding aligns to 4 byte boundary
		if buf.Len() < headerLen {
			return nil, errorMissingData(headerLen - buf.Len())
		}
		binary.Read(buf, binary.BigEndian, &t.TemplateId)
		binary.Read(buf, binary.BigEndian, &t.ScopeLength)
		binary.Read(buf, binary.BigEndian, &t.OptionLength)

		if buf.Len() < int(t.ScopeLength)+int(t.OptionLength) {
			return nil, errorMissingData(int(t.ScopeLength) + int(t.OptionLength) - buf.Len())
		}

		scopeCount := int(t.ScopeLength) / binary.Size(Field{})
		optionCount := int(t.OptionLength) / binary.Size(Field{})

		t.Scopes = parseFieldList(buf, scopeCount)
		t.Options = parseFieldList(buf, optionCount)

		set.Records = append(set.Records, t)
	}

	return set, nil
}
Beispiel #4
0
func (this *DataManager) readData0(inId InIdType) (BigFileIndex, error) {
	var d0 BigFileIndex
	var err error
	filenoSize := uint32(binary.Size(d0.FileNo))
	offsetSize := uint32(binary.Size(d0.Offset))
	lengthSize := uint32(binary.Size(d0.Length))

	// read fileno
	pos := uint32(inId) * uint32(binary.Size(BigFileIndex{}))
	fileno, err := this.data0.ReadNum(pos, filenoSize)
	if err != nil {
		return d0, err
	}

	// read fileoffset
	pos += filenoSize
	offset, err := this.data0.ReadNum(pos, offsetSize)
	if err != nil {
		return d0, err
	}

	// read filelength
	pos += offsetSize
	length, err := this.data0.ReadNum(pos, lengthSize)
	if err != nil {
		return d0, err
	}

	d0.FileNo = uint8(fileno)
	d0.Offset = uint32(offset)
	d0.Length = uint32(length)

	return d0, nil
}
Beispiel #5
0
func parseTemplateFlowSet(data []byte, header *FlowSetHeader) (interface{}, error) {
	var set TemplateFlowSet
	var t TemplateRecord

	set.Id = header.Id
	set.Length = header.Length

	buf := bytes.NewBuffer(data)
	headerLen := binary.Size(t.TemplateId) + binary.Size(t.FieldCount)

	for buf.Len() >= 4 { // Padding aligns to 4 byte boundary
		if buf.Len() < headerLen {
			return nil, errorMissingData(headerLen - buf.Len())
		}
		binary.Read(buf, binary.BigEndian, &t.TemplateId)
		binary.Read(buf, binary.BigEndian, &t.FieldCount)

		fieldsLen := int(t.FieldCount) * binary.Size(Field{})
		if fieldsLen > buf.Len() {
			return nil, errorMissingData(fieldsLen - buf.Len())
		}
		t.Fields = parseFieldList(buf, int(t.FieldCount))

		set.Records = append(set.Records, t)
	}
	return set, nil

}
Beispiel #6
0
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, packSize uint) {
	written := 0
	for _, l := range lengths {
		written += l
	}
	// header length
	written += binary.Size(uint32(0))
	// header
	written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
	// header crypto
	written += crypto.Extension

	// check length
	Equals(t, uint(written), packSize)

	// read and parse it again
	np, err := pack.NewUnpacker(k, rd)
	OK(t, err)
	Equals(t, len(np.Entries), len(bufs))

	for i, b := range bufs {
		e := np.Entries[i]
		Equals(t, b.id, e.ID)

		brd, err := e.GetReader(rd)
		OK(t, err)
		data, err := ioutil.ReadAll(brd)
		OK(t, err)

		Assert(t, bytes.Equal(b.data, data),
			"data for blob %v doesn't match", i)
	}
}
Beispiel #7
0
// readHeader reads the header at the end of rd. size is the length of the
// whole data accessible in rd.
func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
	hl, err := readHeaderLength(rd, size)
	if err != nil {
		return nil, err
	}

	if int64(hl) > size-int64(binary.Size(hl)) {
		return nil, errors.New("header is larger than file")
	}

	if int64(hl) > maxHeaderSize {
		return nil, errors.New("header is larger than maxHeaderSize")
	}

	buf := make([]byte, int(hl))
	n, err := rd.ReadAt(buf, size-int64(hl)-int64(binary.Size(hl)))
	if err != nil {
		return nil, errors.Wrap(err, "ReadAt")
	}

	if n != len(buf) {
		return nil, errors.New("not enough bytes read")
	}

	return buf, nil
}
Beispiel #8
0
func NewPVRTexture(r io.ReadSeeker) (*tisPvrTexture, error) {
	tex := tisPvrTexture{}
	var destLen uint32

	binary.Read(r, binary.LittleEndian, &destLen)

	zr, err := zlib.NewReader(r)
	if err != nil {
		return nil, err
	}
	uncompressed, err := ioutil.ReadAll(zr)
	data := bytes.NewReader(uncompressed)
	data.Seek(0, os.SEEK_SET)
	binary.Read(data, binary.LittleEndian, &tex.Header)
	log.Printf("Header: %+v %d\n", tex.Header, binary.Size(tex.Header))

	switch tex.Header.PixelFormatlo {
	case 0: // 2BPP RGB PVRTC
	case 1: // 2BPP RGBA PVRTC
	case 2: // 4BPP RGB PVRTC
	case 3: // 4BPP RGBA PVRTC
	case 7: // DXT1
		tex.decompressDXT1(uncompressed[binary.Size(tex.Header):])
	case 9: // DXT3
	case 11: // DXT5
	}

	return &tex, nil
}
Beispiel #9
0
func (e *indexEntry) WriteTo(w io.Writer) (n int64, err error) {
	deleted := byte(0)
	if e.deleted {
		deleted = 1
	}

	if err = binary.Write(w, binary.BigEndian, deleted); err != nil {
		return 0, err
	}

	value_len := uint32(len(e.value))
	if err = binary.Write(w, binary.BigEndian, value_len); err != nil {
		return 0, err
	}

	if err = binary.Write(w, binary.BigEndian, []byte(e.value)); err != nil {
		return 0, err
	}

	if err = binary.Write(w, binary.BigEndian, e.id); err != nil {
		return 0, err
	}

	return int64(binary.Size(deleted) + binary.Size([]byte(e.value)) + binary.Size(e.id)), nil
}
func (cc *CoreConvert) makeElfHdr() {
	var hdr elf.Header32
	var phdr elf.Prog32
	var shdr elf.Section32

	copy(hdr.Ident[:], elf.ELFMAG)
	hdr.Ident[elf.EI_CLASS] = byte(elf.ELFCLASS32)
	hdr.Ident[elf.EI_DATA] = byte(elf.ELFDATA2LSB)
	hdr.Ident[elf.EI_VERSION] = byte(elf.EV_CURRENT)
	hdr.Ident[elf.EI_OSABI] = byte(elf.ELFOSABI_NONE)
	hdr.Ident[elf.EI_ABIVERSION] = 0
	hdr.Ident[elf.EI_PAD] = 0
	hdr.Type = uint16(elf.ET_CORE)
	hdr.Machine = uint16(elf.EM_ARM)
	hdr.Version = uint32(elf.EV_CURRENT)
	hdr.Entry = 0
	hdr.Phoff = uint32(binary.Size(hdr))
	hdr.Shoff = 0
	hdr.Flags = 0
	hdr.Ehsize = uint16(binary.Size(hdr))
	hdr.Phentsize = uint16(binary.Size(phdr))
	hdr.Phnum = uint16(len(cc.phdrs))
	hdr.Shentsize = uint16(binary.Size(shdr))
	hdr.Shnum = 0
	hdr.Shstrndx = uint16(elf.SHN_UNDEF)

	cc.elfHdr = &hdr
}
func (cc *CoreConvert) readTlv() (*CoreDumpTlv, error) {
	var tlv CoreDumpTlv

	tlv_buf := make([]byte, binary.Size(tlv))
	if tlv_buf == nil {
		return nil, util.NewNewtError("Out of memory")
	}

	cnt, err := cc.Source.Read(tlv_buf)
	if err == io.EOF {
		return nil, nil
	}
	if err != nil {
		return nil, util.NewNewtError(fmt.Sprintf("Error reading: %s",
			err.Error()))
	}
	if cnt == 0 {
		return nil, nil
	}
	if cnt != binary.Size(tlv) {
		return nil, util.NewNewtError("Short read")
	}

	tlv.Type = uint8(tlv_buf[0])
	tlv.pad = uint8(tlv_buf[1])
	tlv.Len = binary.LittleEndian.Uint16(tlv_buf[2:4])
	tlv.Off = binary.LittleEndian.Uint32(tlv_buf[4:8])

	return &tlv, nil
}
Beispiel #12
0
// NextData returns the next data packet or an error.
func (r *reader) NextData() (raw RawData, err error) {
	var (
		length   uint16
		expected = binary.Size(raw)
		skipped  int
	)

	// Look for the beginning of a navdata packet as indicated by the payload
	// size. This is hacky and will break if parrot increases the payload size,
	// but unfortunately I've been unable with a better sync mechanism, including
	// a very fancy attempt to stop the aquisition, drain the tty buffer in
	// non-blocking mode, and then restart the aquisition. Better ideas are
	// welcome!
	//
	// BUG: Sometimes even this mechanism seems to fail, I suspect due to an odd
	// number of bytes being read if Read() is interrupted by a signal or
	// similar.
	for {
		if err = binary.Read(r.bufReader, binary.LittleEndian, &length); err != nil {
			return
		}
		if int(length) == expected {
			break
		}
		if skipped > expected*2 {
			err = fmt.Errorf("Failed to find payload. skipped=%d", skipped)
			return
		}
		skipped += binary.Size(length)
	}
	err = binary.Read(r.bufReader, binary.LittleEndian, &raw)
	return
}
Beispiel #13
0
func (t *TLK) ConvertToUTF8(w io.WriteSeeker) error {
	w.Seek(0, os.SEEK_SET)
	err := binary.Write(w, binary.LittleEndian, t.header)
	if err != nil {
		return err
	}
	curStringOffset := 0
	strArray := []string{}
	for i := 0; i < len(t.entries); i++ {
		str, err := t.String(i)
		if err != nil {
			return err
		}
		strArray = append(strArray, str)
		t.entries[i].Offset = uint32(curStringOffset)
		t.entries[i].Length = uint32(len(str))
		curStringOffset += int(t.entries[i].Length)
		w.Seek(int64(binary.Size(t.header)+binary.Size(t.entries[0])*i), os.SEEK_SET)
		err = binary.Write(w, binary.LittleEndian, t.entries[i])
		if err != nil {
			return err
		}
	}
	w.Seek(int64(t.header.StringOffset), os.SEEK_SET)
	for _, str := range strArray {
		w.Write([]byte(str))
	}

	return nil
}
Beispiel #14
0
func Peinit() {
	var l int

	switch Thearch.Thechar {
	// 64-bit architectures
	case '6':
		pe64 = 1

		l = binary.Size(&oh64)
		dd = oh64.DataDirectory[:]

		// 32-bit architectures
	default:
		l = binary.Size(&oh)

		dd = oh.DataDirectory[:]
	}

	PEFILEHEADR = int32(Rnd(int64(len(dosstub)+binary.Size(&fh)+l+binary.Size(&sh)), PEFILEALIGN))
	PESECTHEADR = int32(Rnd(int64(PEFILEHEADR), PESECTALIGN))
	nextsectoff = int(PESECTHEADR)
	nextfileoff = int(PEFILEHEADR)

	// some mingw libs depend on this symbol, for example, FindPESectionByName
	xdefine("__image_base__", SDATA, PEBASE)

	xdefine("_image_base__", SDATA, PEBASE)
}
Beispiel #15
0
func (w *Writer) flushIndex() error {
	w.trailer.DataIndexOffset = w.curOffset
	w.trailer.DataIndexCount = uint32(len(w.blocks))

	w.fp.Write(IndexMagic)
	w.curOffset += uint64(len(IndexMagic))

	for _, b := range w.blocks {
		if err := binary.Write(w.fp, binary.BigEndian, b.offset); err != nil {
			return err
		}
		w.curOffset += uint64(binary.Size(b.offset))

		if err := binary.Write(w.fp, binary.BigEndian, b.size); err != nil {
			return err
		}
		w.curOffset += uint64(binary.Size(b.size))

		if i, err := writeUvarint(w.fp, uint64(len(b.firstKeyBytes))); err != nil {
			return err
		} else {
			w.curOffset += uint64(i)
		}

		if i, err := w.fp.Write(b.firstKeyBytes); err != nil {
			return err
		} else {
			w.curOffset += uint64(i)
		}
	}

	return nil
}
Beispiel #16
0
// ReadDataFrom reads a binary representation of the Hll data written
// by WriteDataTo() from io stream. It returns the number of bytes read
// and error.
// If serialized Hll configuration is different it returns error with expected params
func (h *HyperLogLog) ReadDataFrom(stream io.Reader) (int, error) {
	var m uint64
	// read register number first
	err := binary.Read(stream, binary.LittleEndian, &m)
	if err != nil {
		return 0, err
	}
	// check if register number is appropriate
	// hll register number should be same with serialized hll
	if uint64(h.m) != m {
		return 0, fmt.Errorf("expected hll register number %d", m)
	}
	// set other values
	err = binary.Read(stream, binary.LittleEndian, &h.b)
	if err != nil {
		return 0, err
	}

	err = binary.Read(stream, binary.LittleEndian, &h.alpha)
	if err != nil {
		return 0, err
	}

	err = binary.Read(stream, binary.LittleEndian, h.registers)
	if err != nil {
		return 0, err
	}

	// count size of data in registers + m, b, alpha
	size := int(h.m)*binary.Size(uint8(0)) + binary.Size(uint64(0)) + binary.Size(uint32(0)) + binary.Size(float64(0))

	return size, err
}
Beispiel #17
0
// Decode fills the packet withinformation of the packet.
func (r *wireMasterResponse) Decode(packet io.Reader, n int) error {
	if r.Ips == nil {
		r.Ips = make([]wireIP, 0, 50)
	}

	err := binary.Read(packet, byteOrder, &r.Head)
	if err != nil {
		return err
	}

	if !reflect.DeepEqual(r.Head.Magic, masterResponseHeader) {
		return errors.New("Header does not match.")
	}

	remaining := n - binary.Size(r.Head.Magic)
	ipsize := binary.Size(wireIP{})

	for ; remaining >= ipsize; remaining -= ipsize {
		ip := wireIP{}
		// Normal little endian read.
		if err := binary.Read(packet, byteOrder, &ip.Oct); err != nil {
			return err
		}
		// Seperate read because of big endian requirement
		if err := binary.Read(packet, binary.BigEndian, &ip.Port); err != nil {
			return err
		}
		r.Ips = append(r.Ips, ip)
	}

	return nil
}
Beispiel #18
0
func (d *Data) UnMarshal(buf []byte) error {
	n := len(buf)
	r := bytes.NewReader(buf)
	err := binary.Read(r, binary.BigEndian, &d.Head)
	if err != nil {
		return err
	}
	d.SubData = []SubData{}
	for i := binary.Size(d.Head); i < n; {
		sub := SubData{}
		err = binary.Read(r, binary.BigEndian, &sub.Head)
		if err != nil {
			return err
		}
		i += int(binary.Size(sub.Head))
		sub.Params = []tlv.TLV{}
		for j := 0; j < int(sub.Head.ParamsCount); j++ {
			param := tlv.TLV{}
			param.FromBinary(r)
			i += int(param.Length())
			sub.Params = append(sub.Params, param)
		}
		d.SubData = append(d.SubData, sub)
	}

	return nil
}
Beispiel #19
0
func (bli *blockListInterface) readBlockList(reader io.ReaderAt, start int64) (*blockListManager, error) {
	var read int64
	header := new(blockListHeaderData)
	err := readFrom(reader, start, header)
	if err != nil {
		return nil, err
	}

	blm := new(blockListManager)
	blm.header = header
	blm.headerStart = start

	read += int64(binary.Size(header))

	for i := int64(0); i < blm.header.Size; i++ {
		data := new(blockListArrayEntryData)
		err := readFrom(reader, start+read, data)
		if err != nil {
			return blm, err
		}

		info := blockListInfo{data, start + read}
		if data.Size == 0 {
			bli.Freeentries.PushBack(&info)
		} else if data.Free > 0 {
			bli.Freeentries.PushBack(&info)
		}
		bli.BlockListInfos[info.Location] = &info

		read += int64(binary.Size(data))
	}
	return blm, nil
}
Beispiel #20
0
func newFile(path string) (*os.File, *blockListInterface, error) {
	//This function creates a new file and writes out the header and initial block list
	bli := new(blockListInterface)
	bli.BlockListInfos = make(map[int64]*blockListInfo)
	file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
	if err != nil {
		return file, nil, err
	}

	bli.file = file
	header := fileHeaderData{0, 0}
	bli.fileheader = &header
	manager, written, err := bli.newBlockList(file, int64(binary.Size(header)), freeBlockSize)

	if err != nil {
		return file, bli, err
	}

	bli.Blocklists.PushBack(manager)

	header.Freeblock_start = int64(binary.Size(header))
	header.Data_start = header.Freeblock_start + written
	err = writeTo(file, 0, header)
	return file, bli, err
}
Beispiel #21
0
// ReadDataFrom reads a binary representation of the CMS data written
// by WriteDataTo() from io stream. It returns the number of bytes read
// and error
// If serialized CMS configuration is different it returns error with expected params
func (c *CountMinSketch) ReadDataFrom(stream io.Reader) (int, error) {
	var (
		count          uint64
		epsilon, delta float64
	)

	err := binary.Read(stream, binary.LittleEndian, &epsilon)
	if err != nil {
		return 0, err
	}
	err = binary.Read(stream, binary.LittleEndian, &delta)
	if err != nil {
		return 0, err
	}

	// check if serialized and target cms configurations are same
	if c.epsilon != epsilon || c.delta != delta {
		return 0, fmt.Errorf("expected cms values for epsilon %f and delta %f", epsilon, delta)
	}

	err = binary.Read(stream, binary.LittleEndian, &count)
	if err != nil {
		return 0, err
	}

	for i := uint(0); i < uint(c.depth); i++ {
		err = binary.Read(stream, binary.LittleEndian, c.matrix[i])
	}
	// count size of matrix and count
	size := int(c.depth*c.width)*binary.Size(uint64(0)) + binary.Size(count) + 2*binary.Size(float64(0))

	c.count = count

	return size, err
}
Beispiel #22
0
func (t *Cmap) Bytes() []byte {
	numTables := len(t.Subtable)
	header := CmapHeader{
		t.Version,
		USHORT(numTables),
	}
	encodingRecord := make([]EncodingRecord, numTables)
	p := int64(binary.Size(header) + binary.Size(encodingRecord))
	for i, v := range t.Subtable {
		encodingRecord[i].PlatformID = v.PlatformID
		encodingRecord[i].EncodingID = v.EncodingID
		encodingRecord[i].Offset = ULONG(p)
		p = roundUp(p + int64(v.Size()))
	}
	buf := new(bytes.Buffer)
	if err := binary.Write(buf, binary.BigEndian, header); err != nil {
		return nil
	}
	if err := binary.Write(buf, binary.BigEndian, encodingRecord); err != nil {
		return nil
	}
	for _, v := range t.Subtable {
		bytes := v.Bytes()
		size := int64(len(bytes))
		if err := binary.Write(buf, binary.BigEndian, bytes); err != nil {
			return nil
		}
		if err := binary.Write(buf, binary.BigEndian, make([]byte, roundUp(size)-size)); err != nil {
			return nil
		}
	}
	return buf.Bytes()
}
Beispiel #23
0
// ReadFrom reads a binary representation of Buckets (such as might
// have been written by WriteTo()) from an i/o stream. It returns the number
// of bytes read.
func (b *Buckets) ReadFrom(stream io.Reader) (int64, error) {
	var bucketSize, max uint8
	var count, len uint64
	err := binary.Read(stream, binary.BigEndian, &bucketSize)
	if err != nil {
		return 0, err
	}
	err = binary.Read(stream, binary.BigEndian, &max)
	if err != nil {
		return 0, err
	}
	err = binary.Read(stream, binary.BigEndian, &count)
	if err != nil {
		return 0, err
	}
	err = binary.Read(stream, binary.BigEndian, &len)
	if err != nil {
		return 0, err
	}
	data := make([]byte, len)
	err = binary.Read(stream, binary.BigEndian, &data)
	if err != nil {
		return 0, err
	}
	b.bucketSize = bucketSize
	b.max = max
	b.count = uint(count)
	b.data = data
	return int64(int(len) + 2*binary.Size(uint8(0)) + 2*binary.Size(uint64(0))), nil
}
Beispiel #24
0
func (e *indexEntry) ReadFrom(r io.Reader) (n int64, err error) {
	var deleted byte
	if err = binary.Read(r, binary.BigEndian, &deleted); err != nil {
		return 0, err
	}
	e.deleted = (deleted != 0)

	var value_len uint32
	if err = binary.Read(r, binary.BigEndian, &value_len); err != nil {
		return 0, err
	}

	value := make([]byte, int(value_len))
	for i := 0; i < int(value_len); i++ {
		var b byte
		if err = binary.Read(r, binary.BigEndian, &b); err != nil {
			return 0, err
		}
		value[i] = b
	}
	e.value = string(value)

	var id int64
	if err = binary.Read(r, binary.BigEndian, &id); err != nil {
		return 0, err
	}
	e.id = id

	return int64(binary.Size(deleted) + binary.Size(value) + binary.Size(id)), nil
}
Beispiel #25
0
// 完成 enet_packet_header的填充,没有具体的packetheader填充
func enet_packet_fragment_default(chanid uint8, fraglen uint32) (hdr EnetPacketHeader, pkt EnetPacketFragment) {
	hdr.Type = enet_packet_type_fragment
	hdr.Flags = enet_packet_header_flags_needack
	hdr.ChannelID = chanid
	hdr.Size = uint32(binary.Size(hdr)+binary.Size(pkt)) + fraglen
	return
}
Beispiel #26
0
// 完成 enet_packet_header的填充,没有具体的packetheader填充
func enet_packet_ack_default(chanid uint8) (hdr EnetPacketHeader, ack EnetPacketAck) {
	hdr.Type = enet_packet_type_ack
	hdr.Flags = 0
	hdr.ChannelID = chanid
	hdr.Size = uint32(binary.Size(hdr) + binary.Size(ack))
	return
}
Beispiel #27
0
//These are sanity tests to make sure that Go works like I think it does
func TestPointerSize(t *testing.T) {
	//Tests that a binary.Size of a pointer works like a non-pointer
	var e blockListArrayEntryData
	entry := new(blockListArrayEntryData)
	if binary.Size(e) != binary.Size(entry) {
		t.Fatalf("binary.Size of a pointer isn't the same as the struct size")
	}
}
Beispiel #28
0
// 完成 enet_packet_header的填充,没有具体的packetheader填充
func enet_packet_unreliable_default(chanid uint8, payloadlen, usn uint32) (hdr EnetPacketHeader, pkt EnetPacketUnreliable) {
	hdr.Type = enet_packet_type_unreliable
	hdr.Flags = 0
	hdr.ChannelID = chanid
	hdr.Size = uint32(binary.Size(hdr)+binary.Size(pkt)) + payloadlen
	pkt.SN = usn
	return
}
func (cc *CoreConvert) setProgHdrOff() {
	off := binary.Size(cc.elfHdr)
	off += len(cc.phdrs) * binary.Size(cc.phdrs[0])

	for idx, phdr := range cc.phdrs {
		phdr.Off = uint32(off)
		off += len(cc.data[idx])
	}
}
Beispiel #30
0
func (kh *KeyHandler) makeNewList() error {
	header := keyArrayHeader{0, keyblocksize}
	blankKeyEntry := keyEntry{1, 0, 0}
	size := int64(binary.Size(header)) + (int64(binary.Size(blankKeyEntry)) * keyblocksize)
	free, err := kh.bli.GetFree(size)
	if err != nil {
		return err
	}

	offset := free.Entry.Start
	//write the header
	err = writeTo(kh.bli.file, offset, header)
	if err != nil {
		return err
	}
	offset += int64(binary.Size(header))

	//write the entries and create the free infos to write out
	entrysize := int64(binary.Size(blankKeyEntry))
	for i := 0; i < keyblocksize; i++ {
		info := keyInfo{offset, nil, nil}
		err = writeTo(kh.bli.file, offset, blankKeyEntry)
		if err != nil {
			return err
		}
		offset += entrysize
		kh.freeKeyInfos.PushBack(&info)
	}

	headerinfo := keyArrayHeaderInfo{&header, free.Entry.Start}
	el := kh.keyHeaders.Back()
	if el == nil {
		//empty list
		kh.keyHeaders.PushBack(&headerinfo)
		//update the file header since this is the first list
		var fileheader fileHeaderData
		err := readFrom(kh.bli.file, 0, &fileheader)
		if err != nil {
			return err
		}
		fileheader.Data_start = headerinfo.Location
		err = writeTo(kh.bli.file, 0, &fileheader)
		return err
	}
	last, ok := el.Value.(*keyArrayHeaderInfo)
	if !ok {
		return errors.New("Invalid type for headerinfo in makenewlist:")
	}
	last.Header.Next = headerinfo.Location
	err = writeTo(kh.bli.file, last.Location, last.Header)
	if err != nil {
		return err
	}
	kh.keyHeaders.PushBack(&headerinfo)
	return nil
}