Beispiel #1
0
// Creates a new log.
func newLog() *Log {
	return &Log{
		entries:   make([]*LogEntry, 0),
		pBuffer:   proto.NewBuffer(nil),
		pLogEntry: &protobuf.ProtoLogEntry{},
	}
}
Beispiel #2
0
func struct_pack(L *lua.LState) int {

	format := byte(L.CheckInt(2))
	l_value := L.CheckNumber(3)
	b := proto.NewBuffer(nil)
	b.Reset()
	L.SetTop(1)
	switch format {
	case 'i':
		b.EncodeFixed32(uint64(int32(l_value)))
		L.Push(lua.LString(string(b.Bytes())))
	case 'q':
		b.EncodeFixed64(uint64(int64(l_value)))
		L.Push(lua.LString(string(b.Bytes())))
	case 'f':
		b.EncodeFixed32(uint64(float32(l_value)))
		L.Push(lua.LString(string(b.Bytes())))
	case 'd':
		b.EncodeFixed64(uint64(float64(l_value)))
		L.Push(lua.LString(string(b.Bytes())))
	case 'I':
		b.EncodeFixed32(uint64(uint32(l_value)))
		L.Push(lua.LString(string(b.Bytes())))
	case 'Q':
		b.EncodeFixed64(uint64(l_value))
		L.Push(lua.LString(string(b.Bytes())))
	default:
		L.RaiseError("Unknown, format")
	}
	L.Call(1, 0)

	return 0
}
Beispiel #3
0
func receiveMsg(conn net.Conn) {
	buf := make([]byte, 128)
	n, err := conn.Read(buf)
	if checkerr(err) {
		return
	}
	fmt.Printf("\nread (%d) byte from %v :\n%v\n", n, conn.RemoteAddr(), buf[:n])

	protobuf := proto.NewBuffer(buf[:n])
	var msg OSMsg.OSMsg
	pumerr := protobuf.Unmarshal(&msg)
	if checkerr(pumerr) {
		return
	}
	fmt.Printf("[Message]----->%v\n\n", msg.String())

	//
	// umerr := proto.Unmarshal(buf[:n], &msg)
	// if checkerr(umerr) {
	// 	return
	// }
	// fmt.Printf("[MyMessage] %v\n", msg)

	// readStr := string(buf[:n])
	// fmt.Printf("read string (%d):\n%s\n", n, readStr)
	// umterr := proto.UnmarshalText(readStr, &msg)
	// if checkerr(umterr) {
	// 	return
	// }
	// fmt.Printf("[MyMessage] %v\n", msg)
}
Beispiel #4
0
func NewServerCodec(conn zmq.Socket) rpc.ServerCodec {
	req := proto.NewBuffer(nil)
	packetIds := make(idMap)
	clientIds := make(map[uint64]uint64)

	return &serverCodec{conn, req, packetIds, clientIds}
}
Beispiel #5
0
func (l *bufferList) Get() (*proto.Buffer, bool) {
	if v, ok := l.l.Get(); ok {
		return v.(*proto.Buffer), ok
	}

	return proto.NewBuffer(make([]byte, 0, 4096)), false
}
func BenchmarkUnmarshal(b *testing.B) {
	b.StopTimer()

	// BenchmarkUnmarshal is called multiple times.
	once.Do(func() {
		go http.ListenAndServe("localhost:9090", nil)
	})

	raw := prepareBuf(b).Bytes()
	buf := proto.NewBuffer(make([]byte, 0, 4096))

	for i := 0; i < b.N; i++ {
		buf.SetBuf(raw)

		b.StartTimer()
		v := &SampleValueSeries{Value: make([]*SampleValueSeries_Value, 0, numSamples)}
		if err := buf.Unmarshal(v); err != nil {
			b.Fatal(err)
		}
		b.StopTimer()

		if len(v.Value) != numSamples {
			b.Fatal(len(v.Value))
		}
	}
}
Beispiel #7
0
func BuildJsonFromProto(cmdname string, cmddata []byte) string {
	recvbuf := proto.NewBuffer(cmddata)
	recv := &AccountTokenVerifyLoginUserPmd_CS{} //难点,这里这个结构是不确定的,只能动态描述
	recvbuf.Unmarshal(recv)
	recv_json, _ := json.Marshal(recv)
	return string(recv_json)
}
Beispiel #8
0
func struct_unpack(L *lua.LState) int {

	format := byte(L.CheckInt(1))
	l_value := L.CheckString(2)
	pos := L.CheckInt(3)

	b_value := []byte(l_value)

	b := proto.NewBuffer(b_value[pos:])

	switch format {
	case 'i':
		value, _ := b.DecodeFixed32()
		L.Push(lua.LNumber(int32(value)))
	case 'q':
		value, _ := b.DecodeFixed64()
		L.Push(lua.LNumber(int64(value)))
	case 'f':
		value, _ := b.DecodeFixed32()
		L.Push(lua.LNumber(float32(value)))
	case 'd':
		value, _ := b.DecodeFixed64()
		L.Push(lua.LNumber(float64(value)))
	case 'I':
		value, _ := b.DecodeFixed32()
		L.Push(lua.LNumber(uint32(value)))
	case 'Q':
		value, _ := b.DecodeFixed64()
		L.Push(lua.LNumber(uint64(value)))
	default:
		L.RaiseError("Unknown, format")
	}

	return 1
}
Beispiel #9
0
func NewBuffer(rw io.ReadWriter) *Buffer {
	return &Buffer{
		rw:          rw,
		protoBuffer: proto.NewBuffer(nil),
		index:       0,
	}
}
Beispiel #10
0
func BuildProtoFromJson(typ reflect.Type, cmdjson string) []byte {
	proto_cmd := reflect.New(typ).Interface().(proto.Message)
	rawdata := []byte(cmdjson)
	json.Unmarshal(rawdata, proto_cmd)
	sendbuf := proto.NewBuffer(nil)
	sendbuf.Marshal(proto_cmd)
	return sendbuf.Bytes()
}
Beispiel #11
0
func (self *LevelDbShard) Write(database string, series []*protocol.Series) error {
	wb := levigo.NewWriteBatch()
	defer wb.Close()

	for _, s := range series {
		if len(s.Points) == 0 {
			return errors.New("Unable to write no data. Series was nil or had no points.")
		}

		count := 0
		for fieldIndex, field := range s.Fields {
			temp := field
			id, err := self.createIdForDbSeriesColumn(&database, s.Name, &temp)
			if err != nil {
				return err
			}
			keyBuffer := bytes.NewBuffer(make([]byte, 0, 24))
			dataBuffer := proto.NewBuffer(nil)
			for _, point := range s.Points {
				keyBuffer.Reset()
				dataBuffer.Reset()

				keyBuffer.Write(id)
				timestamp := self.convertTimestampToUint(point.GetTimestampInMicroseconds())
				// pass the uint64 by reference so binary.Write() doesn't create a new buffer
				// see the source code for intDataSize() in binary.go
				binary.Write(keyBuffer, binary.BigEndian, &timestamp)
				binary.Write(keyBuffer, binary.BigEndian, point.SequenceNumber)
				pointKey := keyBuffer.Bytes()

				if point.Values[fieldIndex].GetIsNull() {
					wb.Delete(pointKey)
					goto check
				}

				err = dataBuffer.Marshal(point.Values[fieldIndex])
				if err != nil {
					return err
				}
				wb.Put(pointKey, dataBuffer.Bytes())
			check:
				count++
				if count >= self.writeBatchSize {
					err = self.db.Write(self.writeOptions, wb)
					if err != nil {
						return err
					}
					count = 0
					wb.Clear()
				}
			}
		}
	}

	return self.db.Write(self.writeOptions, wb)
}
Beispiel #12
0
func (self *Shard) Write(database string, series []*protocol.Series) error {
	wb := make([]storage.Write, 0)

	for _, s := range series {
		if len(s.Points) == 0 {
			return errors.New("Unable to write no data. Series was nil or had no points.")
		}
		if len(s.FieldIds) == 0 {
			return errors.New("Unable to write points without fields")
		}

		count := 0
		for fieldIndex, id := range s.FieldIds {
			for _, point := range s.Points {
				// keyBuffer and dataBuffer have to be recreated since we are
				// batching the writes, otherwise new writes will override the
				// old writes that are still in memory
				keyBuffer := bytes.NewBuffer(make([]byte, 0, 24))
				dataBuffer := proto.NewBuffer(nil)
				var err error

				binary.Write(keyBuffer, binary.BigEndian, &id)
				timestamp := self.convertTimestampToUint(point.GetTimestampInMicroseconds())
				// pass the uint64 by reference so binary.Write() doesn't create a new buffer
				// see the source code for intDataSize() in binary.go
				binary.Write(keyBuffer, binary.BigEndian, &timestamp)
				binary.Write(keyBuffer, binary.BigEndian, point.SequenceNumber)
				pointKey := keyBuffer.Bytes()

				if point.Values[fieldIndex].GetIsNull() {
					wb = append(wb, storage.Write{Key: pointKey, Value: nil})
					goto check
				}

				err = dataBuffer.Marshal(point.Values[fieldIndex])
				if err != nil {
					return err
				}
				wb = append(wb, storage.Write{Key: pointKey, Value: dataBuffer.Bytes()})
			check:
				count++
				if count >= self.writeBatchSize {
					err = self.db.BatchPut(wb)
					if err != nil {
						return err
					}
					count = 0
					wb = make([]storage.Write, 0, self.writeBatchSize)
				}
			}
		}
	}

	return self.db.BatchPut(wb)
}
Beispiel #13
0
func zig_zag_encode64(L *lua.LState) int {
	b := proto.NewBuffer(nil)
	b.Reset()
	err := b.EncodeZigzag64(uint64(L.CheckNumber(1)))
	if err == nil {
		x, _ := binary.ReadUvarint(bytes.NewBuffer(b.Bytes()))
		L.Push(lua.LNumber(x))
		return 1
	}
	return 0
}
Beispiel #14
0
func (self *Shard) Write(database string, series []*protocol.Series) error {
	self.closeLock.RLock()
	defer self.closeLock.RUnlock()
	if self.closed {
		return fmt.Errorf("Shard is closed")
	}

	wb := make([]storage.Write, 0)

	for _, s := range series {
		if len(s.Points) == 0 {
			return errors.New("Unable to write no data. Series was nil or had no points.")
		}
		if len(s.FieldIds) == 0 {
			return errors.New("Unable to write points without fields")
		}

		count := 0
		for fieldIndex, id := range s.FieldIds {
			for _, point := range s.Points {
				// keyBuffer and dataBuffer have to be recreated since we are
				// batching the writes, otherwise new writes will override the
				// old writes that are still in memory
				dataBuffer := proto.NewBuffer(nil)
				var err error

				sk := newStorageKey(id, point.GetTimestamp(), point.GetSequenceNumber())
				if point.Values[fieldIndex].GetIsNull() {
					wb = append(wb, storage.Write{Key: sk.bytes(), Value: nil})
					goto check
				}

				err = dataBuffer.Marshal(point.Values[fieldIndex])
				if err != nil {
					return err
				}
				wb = append(wb, storage.Write{Key: sk.bytes(), Value: dataBuffer.Bytes()})
			check:
				count++
				if count >= self.writeBatchSize {
					err = self.db.BatchPut(wb)
					if err != nil {
						return err
					}
					count = 0
					wb = make([]storage.Write, 0, self.writeBatchSize)
				}
			}
		}
	}

	return self.db.BatchPut(wb)
}
Beispiel #15
0
func zig_zag_decode64(L *lua.LState) int {
	n := L.CheckNumber(1)
	b_buf := bytes.NewBuffer([]byte{})
	binary.Write(b_buf, binary.LittleEndian, n)
	b := proto.NewBuffer(b_buf.Bytes())
	value, err := b.DecodeZigzag64()
	if err == nil {
		L.Push(lua.LNumber(value))
		return 1
	}

	return 0
}
func prepareBuf(b *testing.B) *proto.Buffer {
	buf := proto.NewBuffer(make([]byte, 0, 4096))
	v := &SampleValueSeries{Value: make([]*SampleValueSeries_Value, 0, numSamples)}
	for i := 0; i < numSamples; i++ {
		v.Value = append(v.Value, &SampleValueSeries_Value{
			Timestamp: proto.Int64(rand.Int63()),
			Value:     proto.Float64(rand.NormFloat64()),
		})
	}
	if err := buf.Marshal(v); err != nil {
		b.Fatal(err)
	}
	return buf
}
Beispiel #17
0
func receiveMsg(conn net.Conn) {
	buf := make([]byte, 128)
	n, err := conn.Read(buf)
	if checkerr(err) {
		os.Exit(-1)
		return
	}
	// fmt.Printf("\nread (%d) byte from %v :\n%v\n", n, conn.RemoteAddr(), buf[:n])

	protobuf := proto.NewBuffer(buf[:n])
	var _msg msg.OSMsg
	pumerr := protobuf.Unmarshal(&_msg)
	if checkerr(pumerr) {
		return
	}
	fmt.Printf("[received <----- message]%v\n", _msg.String())
}
Beispiel #18
0
func CreateHekaStream(msgBytes []byte, outBytes *[]byte,
	msc *message.MessageSigningConfig) error {

	h := &message.Header{}
	h.SetMessageLength(uint32(len(msgBytes)))
	if msc != nil {
		h.SetHmacSigner(msc.Name)
		h.SetHmacKeyVersion(msc.Version)
		var hm hash.Hash
		switch msc.Hash {
		case "sha1":
			hm = hmac.New(sha1.New, []byte(msc.Key))
			h.SetHmacHashFunction(message.Header_SHA1)
		default:
			hm = hmac.New(md5.New, []byte(msc.Key))
		}

		hm.Write(msgBytes)
		h.SetHmac(hm.Sum(nil))
	}
	headerSize := proto.Size(h)
	requiredSize := message.HEADER_FRAMING_SIZE + headerSize + len(msgBytes)
	if requiredSize > message.MAX_RECORD_SIZE {
		return fmt.Errorf("Message too big, requires %d (MAX_RECORD_SIZE = %d)",
			requiredSize, message.MAX_RECORD_SIZE)
	}
	if cap(*outBytes) < requiredSize {
		*outBytes = make([]byte, requiredSize)
	} else {
		*outBytes = (*outBytes)[:requiredSize]
	}
	(*outBytes)[0] = message.RECORD_SEPARATOR
	(*outBytes)[1] = uint8(headerSize)
	// This looks odd but is correct; it effectively "seeks" the initial write
	// position for the protobuf output to be at the
	// `(*outBytes)[message.HEADER_DELIMITER_SIZE]` position.
	pbuf := proto.NewBuffer((*outBytes)[message.HEADER_DELIMITER_SIZE:message.HEADER_DELIMITER_SIZE])
	if err := pbuf.Marshal(h); err != nil {
		return err
	}
	(*outBytes)[headerSize+message.HEADER_DELIMITER_SIZE] = message.UNIT_SEPARATOR
	copy((*outBytes)[message.HEADER_FRAMING_SIZE+headerSize:], msgBytes)
	return nil
}
Beispiel #19
0
func createStream(msgBytes []byte, encoding message.Header_MessageEncoding,
	outBytes *[]byte, msc *message.MessageSigningConfig) error {
	h := &message.Header{}
	h.SetMessageLength(uint32(len(msgBytes)))
	if encoding != message.Default_Header_MessageEncoding {
		h.SetMessageEncoding(encoding)
	}
	if msc != nil {
		h.SetHmacSigner(msc.Name)
		h.SetHmacKeyVersion(msc.Version)
		var hm hash.Hash
		switch msc.Hash {
		case "sha1":
			hm = hmac.New(sha1.New, []byte(msc.Key))
			h.SetHmacHashFunction(message.Header_SHA1)
		default:
			hm = hmac.New(md5.New, []byte(msc.Key))
		}

		hm.Write(msgBytes)
		h.SetHmac(hm.Sum(nil))
	}
	headerSize := uint8(proto.Size(h))
	requiredSize := int(3 + headerSize)
	if cap(*outBytes) < requiredSize {
		*outBytes = make([]byte, requiredSize, requiredSize+len(msgBytes))
	} else {
		*outBytes = (*outBytes)[:requiredSize]
	}
	(*outBytes)[0] = message.RECORD_SEPARATOR
	(*outBytes)[1] = uint8(headerSize)
	pbuf := proto.NewBuffer((*outBytes)[2:2])
	err := pbuf.Marshal(h)
	if err != nil {
		return err
	}
	(*outBytes)[headerSize+2] = message.UNIT_SEPARATOR
	*outBytes = append(*outBytes, msgBytes...)
	return nil
}
Beispiel #20
0
func NewBufferPair() *bufferPair {
	return &bufferPair{proto.NewBuffer(nil), proto.NewBuffer(nil)}
}
Beispiel #21
0
func NewDecoder(r io.Reader) *Decoder {
	return &Decoder{
		r:      r,
		buffer: proto.NewBuffer(nil),
	}
}
Beispiel #22
0
func NewEncoder(w io.Writer) *Encoder {
	return &Encoder{
		w:      w,
		buffer: proto.NewBuffer(nil),
	}
}
Beispiel #23
0
func main() {
	uj := &Pmd.UserJsMessageForwardUserPmd_CS{}
	uj.Msgbytes = []byte(`{"whj":111}`)
	fmt.Println("XXXXX", uj.String(), proto.MarshalTextString(uj))
	rc := &Pmd.ReconnectErrorLoginUserPmd_S{}
	rc.Desc = proto.String(`{"whj":111}`)
	fmt.Println("xxxxx", rc.String(), proto.MarshalTextString(rc), *rc.Desc)
	m := make(map[int]string)
	m[1] = "wabghaijun"
	a := 1
	fmt.Println(protobuf.Encode(&a))
	mset := make(map[int32]proto.Extension)
	//mset[1] = proto.Extension{enc: []byte("sss")}
	//umset, err := proto.MarshalMessageSet(mset)
	var b []byte
	fmt.Println(len(b))
	nmd := &Pmd.ForwardNullUserPmd_CS{}
	nmd1 := &Pmd.ForwardNullUserPmd_CS{}
	nmd2 := &Pmd.ForwardNullUserPmd_CS{}
	cmd3 := &Pmd.RequestCloseNullUserPmd_CS{}
	cmd4 := &Pmd.RequestCloseNullUserPmd_CS{}
	cmd3.Reason = proto.String("2222")
	fmt.Println(proto.GetProperties(reflect.TypeOf(cmd3).Elem()))
	cmd3byte, err1 := proto.Marshal(cmd3)
	if err1 != nil {
		fmt.Println("xxxxxxxxxxx", err1)
	}
	fmt.Println(proto.Unmarshal(cmd3byte, cmd3))
	fmt.Println(mset)
	cmd3test := proto.MarshalTextString(cmd3)
	fmt.Println(cmd3test)
	fmt.Println(proto.UnmarshalText(cmd3test, nmd))
	//nmd.Prototype = proto.Uint64(2)
	nmd.ByCmd = proto.Uint32(0)
	//nmd.ByParam = proto.Uint32(0)
	//nmd.ByCmd = append(nmd.ByCmd, 0)
	//nmd.ByParam = append(nmd.ByParam, 0)
	sendbuf := proto.NewBuffer(nil)
	err := sendbuf.Marshal(nmd)
	if err != nil {
		fmt.Println("1", err)
	}
	nmd.Data = sendbuf.Bytes()
	fmt.Println(nmd, proto.Size(nmd), len(sendbuf.Bytes()))
	fmt.Println(len(sendbuf.Bytes()), sendbuf.Bytes())
	//data := sendbuf.Bytes()
	err = sendbuf.Marshal(cmd3)
	if err != nil {
		fmt.Println("2", err)
	}
	fmt.Println(len(sendbuf.Bytes()), sendbuf.Bytes())
	data := sendbuf.Bytes()
	fmt.Println(len(data), data)
	//data = append(data, byte(1))
	databuf := proto.NewBuffer(data)
	err = databuf.Unmarshal(nmd1)
	if err != nil {
		fmt.Println("3", err)
	}
	//err = databuf.Unmarshal(nmd2)
	err = proto.Unmarshal(data[:2], nmd2)
	if err != nil {
		fmt.Println("4", err)
	}
	err = proto.Unmarshal(data[2:], cmd4)
	//err = databuf.Unmarshal(cmd4)
	if err != nil {
		fmt.Println("5", err)
	}
	fmt.Println(nmd, proto.Size(nmd))
	fmt.Println(nmd1)
	fmt.Println(nmd2)
	fmt.Println(cmd4)
}
Beispiel #24
0
func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec {
	req := &bufferPair{proto.NewBuffer(nil), proto.NewBuffer(nil)}
	resp := &bufferPair{proto.NewBuffer(nil), proto.NewBuffer(nil)}

	return &serverCodec{conn, req, resp}
}
// Advance the iterator to the next point
func (pi *PointIterator) Next() {
	valueBuffer := proto.NewBuffer(nil)
	pi.valid = false
	pi.point = &protocol.Point{Values: make([]*protocol.FieldValue, len(pi.fields))}

	err := pi.getIteratorNextValue()
	if err != nil {
		pi.setError(err)
		return
	}

	var next *rawColumnValue

	// choose the highest (or lowest in case of ascending queries) timestamp
	// and sequence number. that will become the timestamp and sequence of
	// the next point.
	for i, value := range pi.rawColumnValues {
		if value.value == nil {
			continue
		}

		if next == nil {
			next = &pi.rawColumnValues[i]
			continue
		}

		if pi.asc {
			if value.before(next) {
				next = &pi.rawColumnValues[i]
			}
			continue
		}

		// the query is descending
		if value.after(next) {
			next = &pi.rawColumnValues[i]
		}
	}

	for i, iterator := range pi.itrs {
		rcv := &pi.rawColumnValues[i]
		log4go.Debug("Column value: %s", rcv)

		// if the value is nil or doesn't match the point's timestamp and sequence number
		// then skip it
		if rcv.value == nil || rcv.time != next.time || rcv.sequence != next.sequence {
			log4go.Debug("rcv = %#v, next = %#v", rcv, next)
			pi.point.Values[i] = &protocol.FieldValue{IsNull: &TRUE}
			continue
		}

		// if we emitted at least one column, then we should keep
		// trying to get more points
		log4go.Debug("Setting is valid to true")
		pi.valid = true

		// advance the iterator to read a new value in the next iteration
		if pi.asc {
			iterator.Next()
		} else {
			iterator.Prev()
		}

		fv := &protocol.FieldValue{}
		valueBuffer.SetBuf(rcv.value)
		err := valueBuffer.Unmarshal(fv)
		if err != nil {
			log4go.Error("Error while running query: %s", err)
			pi.setError(err)
			return
		}
		pi.point.Values[i] = fv
		rcv.value = nil
	}

	// this will only happen if there are no points for the given series
	// and range and this is the first call to Next(). Otherwise we
	// always call Next() on a valid PointIterator so we know we have
	// more points
	if next == nil {
		return
	}

	pi.point.SetTimestampInMicroseconds(next.time)
	pi.point.SequenceNumber = proto.Uint64(next.sequence)
}
Beispiel #26
0
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, seriesName string, columns []string, processor cluster.QueryProcessor) error {
	startTimeBytes := self.byteArrayForTime(querySpec.GetStartTime())
	endTimeBytes := self.byteArrayForTime(querySpec.GetEndTime())

	fields, err := self.getFieldsForSeries(querySpec.Database(), seriesName, columns)
	if err != nil {
		// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
		switch err := err.(type) {
		case FieldLookupError:
			log.Debug("Cannot find fields %v", columns)
			return nil
		default:
			log.Error("Error looking up fields for %s: %s", seriesName, err)
			return fmt.Errorf("Error looking up fields for %s: %s", seriesName, err)
		}
	}

	fieldCount := len(fields)
	rawColumnValues := make([]rawColumnValue, fieldCount, fieldCount)
	query := querySpec.SelectQuery()

	aliases := query.GetTableAliases(seriesName)
	if querySpec.IsSinglePointQuery() {
		series, err := self.fetchSinglePoint(querySpec, seriesName, fields)
		if err != nil {
			log.Error("Error reading a single point: %s", err)
			return err
		}
		if len(series.Points) > 0 {
			processor.YieldPoint(series.Name, series.Fields, series.Points[0])
		}
		return nil
	}

	fieldNames, iterators := self.getIterators(fields, startTimeBytes, endTimeBytes, query.Ascending)
	defer func() {
		for _, it := range iterators {
			it.Close()
		}
	}()

	seriesOutgoing := &protocol.Series{Name: protocol.String(seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, self.pointBatchSize)}

	// TODO: clean up, this is super gnarly
	// optimize for the case where we're pulling back only a single column or aggregate
	buffer := bytes.NewBuffer(nil)
	valueBuffer := proto.NewBuffer(nil)
	for {
		isValid := false
		point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}

		for i, it := range iterators {
			if rawColumnValues[i].value != nil || !it.Valid() {
				if err := it.Error(); err != nil {
					return err
				}
				continue
			}

			key := it.Key()
			if len(key) < 16 {
				continue
			}

			if !isPointInRange(fields[i].Id, startTimeBytes, endTimeBytes, key) {
				continue
			}

			value := it.Value()
			sequenceNumber := key[16:]

			rawTime := key[8:16]
			rawColumnValues[i] = rawColumnValue{time: rawTime, sequence: sequenceNumber, value: value}
		}

		var pointTimeRaw []byte
		var pointSequenceRaw []byte
		// choose the highest (or lowest in case of ascending queries) timestamp
		// and sequence number. that will become the timestamp and sequence of
		// the next point.
		for _, value := range rawColumnValues {
			if value.value == nil {
				continue
			}

			pointTimeRaw, pointSequenceRaw = value.updatePointTimeAndSequence(pointTimeRaw,
				pointSequenceRaw, query.Ascending)
		}

		for i, iterator := range iterators {
			// if the value is nil or doesn't match the point's timestamp and sequence number
			// then skip it
			if rawColumnValues[i].value == nil ||
				!bytes.Equal(rawColumnValues[i].time, pointTimeRaw) ||
				!bytes.Equal(rawColumnValues[i].sequence, pointSequenceRaw) {

				point.Values[i] = &protocol.FieldValue{IsNull: &TRUE}
				continue
			}

			// if we emitted at lease one column, then we should keep
			// trying to get more points
			isValid = true

			// advance the iterator to read a new value in the next iteration
			if query.Ascending {
				iterator.Next()
			} else {
				iterator.Prev()
			}

			fv := &protocol.FieldValue{}
			valueBuffer.SetBuf(rawColumnValues[i].value)
			err := valueBuffer.Unmarshal(fv)
			if err != nil {
				log.Error("Error while running query: %s", err)
				return err
			}
			point.Values[i] = fv
			rawColumnValues[i].value = nil
		}

		var sequence uint64
		var t uint64

		// set the point sequence number and timestamp
		buffer.Reset()
		buffer.Write(pointSequenceRaw)
		binary.Read(buffer, binary.BigEndian, &sequence)
		buffer.Reset()
		buffer.Write(pointTimeRaw)
		binary.Read(buffer, binary.BigEndian, &t)

		time := self.convertUintTimestampToInt64(&t)
		point.SetTimestampInMicroseconds(time)
		point.SequenceNumber = &sequence

		// stop the loop if we ran out of points
		if !isValid {
			break
		}

		shouldContinue := true

		seriesOutgoing.Points = append(seriesOutgoing.Points, point)

		if len(seriesOutgoing.Points) >= self.pointBatchSize {
			for _, alias := range aliases {
				series := &protocol.Series{
					Name:   proto.String(alias),
					Fields: fieldNames,
					Points: seriesOutgoing.Points,
				}
				if !processor.YieldSeries(series) {
					log.Info("Stopping processing")
					shouldContinue = false
				}
			}
			seriesOutgoing = &protocol.Series{Name: protocol.String(seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
		}

		if !shouldContinue {
			break
		}
	}

	//Yield remaining data
	for _, alias := range aliases {
		log.Debug("Final Flush %s", alias)
		series := &protocol.Series{Name: protocol.String(alias), Fields: seriesOutgoing.Fields, Points: seriesOutgoing.Points}
		if !processor.YieldSeries(series) {
			log.Debug("Cancelled...")
		}
	}

	log.Debug("Finished running query %s", query.GetQueryString())
	return nil
}
Beispiel #27
0
func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec {
	req := &bufferPair{proto.NewBuffer(nil), proto.NewBuffer(nil)}
	resp := &bufferPair{proto.NewBuffer(nil), proto.NewBuffer(nil)}

	return &clientCodec{conn, req, resp}
}