// NewDecoder returns a new proto.Decoder that satisfies goa.Decoder func NewDecoder(r io.Reader) goa.Decoder { return &ProtoDecoder{ pBuf: proto.NewBuffer(nil), bBuf: &bytes.Buffer{}, r: r, } }
func (c *codec) Encoder(w io.Writer) mc.Encoder { return &encoder{ w: w, buf: proto.NewBuffer(nil), c: c, } }
func CreateHekaStream(msgBytes []byte, outBytes *[]byte, msc *message.MessageSigningConfig) error { msgSize := uint32(len(msgBytes)) if msgSize > message.MAX_MESSAGE_SIZE { return fmt.Errorf("Message too big, requires %d (MAX_MESSAGE_SIZE = %d)", len(msgBytes), message.MAX_MESSAGE_SIZE) } h := &message.Header{} h.SetMessageLength(msgSize) if msc != nil { h.SetHmacSigner(msc.Name) h.SetHmacKeyVersion(msc.Version) var hm hash.Hash switch msc.Hash { case "sha1": hm = hmac.New(sha1.New, []byte(msc.Key)) h.SetHmacHashFunction(message.Header_SHA1) default: hm = hmac.New(md5.New, []byte(msc.Key)) } hm.Write(msgBytes) h.SetHmac(hm.Sum(nil)) } headerSize := proto.Size(h) if headerSize > message.MAX_HEADER_SIZE { return fmt.Errorf("Message header too big, requires %d (MAX_HEADER_SIZE = %d)", headerSize, message.MAX_HEADER_SIZE) } requiredSize := message.HEADER_FRAMING_SIZE + headerSize + len(msgBytes) if cap(*outBytes) < requiredSize { *outBytes = make([]byte, requiredSize) } else { *outBytes = (*outBytes)[:requiredSize] } (*outBytes)[0] = message.RECORD_SEPARATOR (*outBytes)[1] = uint8(headerSize) // This looks odd but is correct; it effectively "seeks" the initial write // position for the protobuf output to be at the // `(*outBytes)[message.HEADER_DELIMITER_SIZE]` position. pbuf := proto.NewBuffer((*outBytes)[message.HEADER_DELIMITER_SIZE:message.HEADER_DELIMITER_SIZE]) if err := pbuf.Marshal(h); err != nil { return err } (*outBytes)[headerSize+message.HEADER_DELIMITER_SIZE] = message.UNIT_SEPARATOR copy((*outBytes)[message.HEADER_FRAMING_SIZE+headerSize:], msgBytes) return nil }
func main() { var sizeBuf [4]byte inbuf := make([]byte, 0, 4096) outbuf := proto.NewBuffer(nil) for { if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF { break } else if err != nil { fmt.Fprintln(os.Stderr, "go conformance: read request:", err) os.Exit(1) } size := binary.LittleEndian.Uint32(sizeBuf[:]) if int(size) > cap(inbuf) { inbuf = make([]byte, size) } inbuf = inbuf[:size] if _, err := io.ReadFull(os.Stdin, inbuf); err != nil { fmt.Fprintln(os.Stderr, "go conformance: read request:", err) os.Exit(1) } req := new(pb.ConformanceRequest) if err := proto.Unmarshal(inbuf, req); err != nil { fmt.Fprintln(os.Stderr, "go conformance: parse request:", err) os.Exit(1) } res := handle(req) if err := outbuf.Marshal(res); err != nil { fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err) os.Exit(1) } binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes()))) if _, err := os.Stdout.Write(sizeBuf[:]); err != nil { fmt.Fprintln(os.Stderr, "go conformance: write response:", err) os.Exit(1) } if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil { fmt.Fprintln(os.Stderr, "go conformance: write response:", err) os.Exit(1) } outbuf.Reset() } }
func CreateHekaStream(msgBytes []byte, outBytes *[]byte) error { h := &message.Header{} h.SetMessageLength(uint32(len(msgBytes))) headerSize := proto.Size(h) requiredSize := message.HEADER_FRAMING_SIZE + headerSize + len(msgBytes) if cap(*outBytes) < requiredSize { *outBytes = make([]byte, requiredSize) } else { *outBytes = (*outBytes)[:requiredSize] } (*outBytes)[0] = message.RECORD_SEPARATOR (*outBytes)[1] = uint8(headerSize) pbuf := proto.NewBuffer((*outBytes)[message.HEADER_DELIMITER_SIZE:message.HEADER_DELIMITER_SIZE]) if err := pbuf.Marshal(h); err != nil { return err } (*outBytes)[headerSize+message.HEADER_DELIMITER_SIZE] = message.UNIT_SEPARATOR copy((*outBytes)[message.HEADER_FRAMING_SIZE+headerSize:], msgBytes) return nil }
// them atomically on a call to Commit(). NewBatch() Engine // Commit atomically applies any batched updates to the underlying // engine. This is a noop unless the engine was created via NewBatch(). Commit() error // Defer adds a callback to be run after the batch commits // successfully. If Commit() fails (or if this engine was not // created via NewBatch()), deferred callbacks are not called. As // with the defer statement, the last callback to be deferred is the // first to be executed. Defer(fn func()) } var bufferPool = sync.Pool{ New: func() interface{} { return proto.NewBuffer(nil) }, } // PutProto sets the given key to the protobuf-serialized byte string // of msg and the provided timestamp. Returns the length in bytes of // key and the value. func PutProto(engine Engine, key MVCCKey, msg proto.Message) (keyBytes, valBytes int64, err error) { buf := bufferPool.Get().(*proto.Buffer) buf.Reset() if err = buf.Marshal(msg); err != nil { bufferPool.Put(buf) return } data := buf.Bytes()
// them atomically on a call to Commit(). NewBatch() Engine // Commit atomically applies any batched updates to the underlying // engine. This is a noop unless the engine was created via NewBatch(). Commit() error // Defer adds a callback to be run after the batch commits // successfully. If Commit() fails (or if this engine was not // created via NewBatch()), deferred callbacks are not called. As // with the defer statement, the last callback to be deferred is the // first to be executed. Defer(fn func()) } var bufferPool = sync.Pool{ New: func() interface{} { return gogoproto.NewBuffer(nil) }, } // PutProto sets the given key to the protobuf-serialized byte string // of msg and the provided timestamp. Returns the length in bytes of // key and the value. func PutProto(engine Engine, key proto.EncodedKey, msg gogoproto.Message) (keyBytes, valBytes int64, err error) { buf := bufferPool.Get().(*gogoproto.Buffer) buf.Reset() if err = buf.Marshal(msg); err != nil { bufferPool.Put(buf) return } data := buf.Bytes()
// NewEncoder returns a new proto.Encoder that satisfies goa.Encoder func NewEncoder(w io.Writer) goa.Encoder { return &ProtoEncoder{ pBuf: proto.NewBuffer(nil), w: w, } }
const TextLogTime = "2006-01-02 15:04:05 -0700" // TryClose closes w if w implements io.Closer. func TryClose(w io.Writer) (err error) { if c, ok := w.(io.Closer); ok { err = c.Close() } return } // hekaMessagePool holds recycled Heka message objects and encoding buffers. var hekaMessagePool = sync.Pool{New: func() interface{} { return &hekaMessage{ header: new(Header), msg: new(Message), buf: proto.NewBuffer(nil), } }} func newHekaMessage() *hekaMessage { return hekaMessagePool.Get().(*hekaMessage) } type hekaMessage struct { header *Header msg *Message buf *proto.Buffer outBytes []byte } func (hm *hekaMessage) free() {