Esempio n. 1
0
func pgsqlFieldsParser(s *PgsqlStream) {
	m := s.message

	// read field count (int16)
	field_count := int(common.Bytes_Ntohs(s.data[s.parseOffset : s.parseOffset+2]))
	s.parseOffset += 2
	logp.Debug("pgsqldetailed", "Row Description field count=%d", field_count)

	fields := []string{}
	fields_format := []byte{}

	for i := 0; i < field_count; i++ {
		// read field name (null terminated string)
		field_name, err := common.ReadString(s.data[s.parseOffset:])
		if err != nil {
			logp.Err("Fail to read the column field")
		}
		fields = append(fields, field_name)
		m.NumberOfFields += 1
		s.parseOffset += len(field_name) + 1

		// read Table OID (int32)
		s.parseOffset += 4

		// read Column Index (int16)
		s.parseOffset += 2

		// read Type OID (int32)
		s.parseOffset += 4

		// read column length (int16)
		s.parseOffset += 2

		// read type modifier (int32)
		s.parseOffset += 4

		// read format (int16)
		format := common.Bytes_Ntohs(s.data[s.parseOffset : s.parseOffset+2])
		fields_format = append(fields_format, byte(format))
		s.parseOffset += 2

		logp.Debug("pgsqldetailed", "Field name=%s, format=%d", field_name, format)
	}
	m.Fields = fields
	m.FieldsFormat = fields_format
	if m.NumberOfFields != field_count {
		logp.Err("Missing fields from RowDescription. Expected %d. Received %d", field_count, m.NumberOfFields)
	}
}
Esempio n. 2
0
func (thrift *Thrift) readI16(data []byte) (value string, ok bool, complete bool, off int) {
	if len(data) < 2 {
		return "", true, false, 0
	}
	i16 := common.Bytes_Ntohs(data[:2])
	value = strconv.Itoa(int(i16))

	return value, true, true, 2
}
Esempio n. 3
0
// Parse 16bit binary value from the buffer at index. Will not advance the read
// buffer
func (b *Buffer) ReadNetUint16At(index int) (uint16, error) {
	if b.Failed() {
		return 0, b.err
	}
	if !b.Avail(2 + index) {
		return 0, b.bufferEndError()
	}
	return common.Bytes_Ntohs(b.data[index+b.mark:]), nil

}
Esempio n. 4
0
// Parse 16bit binary value in network byte order from Buffer
// (converted to Host order).
func (b *Buffer) ReadNetUint16() (uint16, error) {
	if b.Failed() {
		return 0, b.err
	}
	tmp := b.data[b.mark:]
	if err := b.Advance(2); err != nil {
		return 0, err
	}
	value := common.Bytes_Ntohs(tmp)
	return value, nil
}
Esempio n. 5
0
func (thrift *Thrift) readStruct(data []byte) (value string, ok bool, complete bool, off int) {

	var bytesRead int
	offset := 0
	fields := []ThriftField{}

	// Loop until hitting a STOP or reaching the maximum number of elements
	// we follow in a stream (at which point, we assume we interpreted something
	// wrong).
	for i := 0; ; i++ {
		var field ThriftField

		if i >= thrift.DropAfterNStructFields {
			logp.Debug("thrift", "Too many fields in struct. Dropping as error")
			return "", false, false, 0
		}

		if len(data) < 1 {
			return "", true, false, 0
		}

		field.Type = byte(data[offset])
		offset += 1
		if field.Type == ThriftTypeStop {
			return thrift.formatStruct(fields, false, []*string{}), true, true, offset
		}

		if len(data[offset:]) < 2 {
			return "", true, false, 0 // not complete
		}

		field.Id = common.Bytes_Ntohs(data[offset : offset+2])
		offset += 2

		funcReader, typeFound := thrift.funcReadersByType(field.Type)
		if !typeFound {
			logp.Debug("thrift", "Field type %d not known", field.Type)
			return "", false, false, 0
		}

		field.Value, ok, complete, bytesRead = funcReader(data[offset:])

		if !ok {
			return "", false, false, 0
		}
		if !complete {
			return "", true, false, 0
		}
		fields = append(fields, field)
		offset += bytesRead
	}
}
Esempio n. 6
0
func (pgsql *Pgsql) pgsqlRowsParser(s *PgsqlStream) {
	m := s.message

	// read field count (int16)
	field_count := int(common.Bytes_Ntohs(s.data[s.parseOffset : s.parseOffset+2]))
	s.parseOffset += 2
	logp.Debug("pgsqldetailed", "DataRow field count=%d", field_count)

	row := []string{}
	var row_len int

	for i := 0; i < field_count; i++ {

		// read column length (int32)
		column_length := int32(common.Bytes_Ntohl(s.data[s.parseOffset : s.parseOffset+4]))
		s.parseOffset += 4

		// read column value (byten)
		column_value := []byte{}

		if m.FieldsFormat[i] == 0 {
			// field value in text format
			if column_length > 0 {
				column_value = s.data[s.parseOffset : s.parseOffset+int(column_length)]
			} else if column_length == -1 {
				column_value = nil
			}
		}

		if row_len < pgsql.maxRowLength {
			if row_len+len(column_value) > pgsql.maxRowLength {
				column_value = column_value[:pgsql.maxRowLength-row_len]
			}
			row = append(row, string(column_value))
			row_len += len(column_value)
		}

		if column_length > 0 {
			s.parseOffset += int(column_length)
		}

		logp.Debug("pgsqldetailed", "Value %s, length=%d", string(column_value), column_length)

	}
	m.NumberOfRows += 1
	if len(m.Rows) < pgsql.maxStoreRows {
		m.Rows = append(m.Rows, row)
	}
}
Esempio n. 7
0
func (thrift *Thrift) readField(s *ThriftStream) (ok bool, complete bool, field *ThriftField) {

	var off int

	field = new(ThriftField)

	if len(s.data) == 0 {
		return true, false, nil // ok, not complete
	}
	field.Type = byte(s.data[s.parseOffset])
	offset := s.parseOffset + 1
	if field.Type == ThriftTypeStop {
		s.parseOffset = offset
		return true, true, nil // done
	}

	if len(s.data[offset:]) < 2 {
		return true, false, nil // ok, not complete
	}
	field.Id = common.Bytes_Ntohs(s.data[offset : offset+2])
	offset += 2

	funcReader, typeFound := thrift.funcReadersByType(field.Type)
	if !typeFound {
		logp.Debug("thrift", "Field type %d not known", field.Type)
		return false, false, nil
	}

	field.Value, ok, complete, off = funcReader(s.data[offset:])

	if !ok {
		return false, false, nil
	}
	if !complete {
		return true, false, nil
	}
	offset += off

	s.parseOffset = offset
	return true, false, field
}