// call KVAutobus key_range API func (db *KVAutobus) getKeyRange(kStart, kEnd storage.Key) (Ks, error) { b64key1 := encodeKey(kStart) b64key2 := encodeKey(kEnd) url := fmt.Sprintf("%s/kvautobus/api/key_range/%s/%s/%s/", db.host, db.collection, b64key1, b64key2) timedLog := dvid.NewTimeLog() resp, err := db.client.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, nil // Handle no keys found. } r := msgp.NewReader(bufio.NewReader(resp.Body)) var mks Ks if err := mks.DecodeMsg(r); err != nil { return nil, err } for _, mk := range mks { storage.StoreKeyBytesRead <- len(mk) } timedLog.Infof("PROXY key_range to %s returned %d (%d keys)\n", db.host, resp.StatusCode, len(mks)) return mks, nil }
func (h *StoreHandler) parseReq(sr *StoreReq) error { /* unpack */ msgr := msgp.NewReader(sr.reqBuffer) reqi, err := msgr.ReadIntf() if err != nil { return errors.New(fmt.Sprintf("fail to decode data from storeReq: %s", err.Error())) } req, ok := reqi.(map[string]interface{}) if !ok { return errors.New(fmt.Sprintf("invalid data from storeReq: data need be map")) } datai, ok := req["data"] if !ok { return errors.New(fmt.Sprintf("invalid data from storeReq: data not exist")) } sr.data = datai.([]byte) idi, ok := req["id"] if !ok { return errors.New(fmt.Sprintf("invalid data from storeReq: id not exist")) } sr.id, ok = idi.(uint64) if !ok { idt, ok := idi.(int64) if !ok || idt < 0 { return errors.New(fmt.Sprintf("invalid data from storeReq: id is not int64 or uint64, or < 0")) } sr.id = uint64(idt) } return nil }
// check if any metadata has been written into this store. func (db *KVAutobus) metadataExists() (bool, error) { var ctx storage.MetadataContext kStart, kEnd := ctx.KeyRange() b64key1 := encodeKey(kStart) b64key2 := encodeKey(kEnd) url := fmt.Sprintf("%s/kvautobus/api/key_range/%s/%s/%s/", db.host, db.collection, b64key1, b64key2) dvid.Infof("metdataExists: doing GET on %s\n", url) timedLog := dvid.NewTimeLog() resp, err := db.client.Get(url) if err != nil { return false, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return true, nil // Handle no keys found. } r := msgp.NewReader(bufio.NewReader(resp.Body)) var mks Ks if err := mks.DecodeMsg(r); err != nil { return false, err } timedLog.Infof("PROXY key_range metadata to %s returned %d (%d keys)\n", db.host, resp.StatusCode, len(mks)) if len(mks) == 0 { return false, nil } return true, nil }
// call KVAutobus keyvalue_range API func (db *KVAutobus) getKVRange(kStart, kEnd storage.Key) (KVs, error) { b64key1 := encodeKey(kStart) b64key2 := encodeKey(kEnd) url := fmt.Sprintf("%s/kvautobus/api/keyvalue_range/%s/%s/", db.host, b64key1, b64key2) timedLog := dvid.NewTimeLog() resp, err := http.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, nil // Handle no keys found. } r := msgp.NewReader(bufio.NewReader(resp.Body)) var mkvs KVs if err := mkvs.DecodeMsg(r); err != nil { dvid.Errorf("Couldn't decode getKVRange return\n") return nil, err } timedLog.Infof("PROXY keyvalue_range to %s returned %d (%d kv pairs)\n", db.host, resp.StatusCode, len(mkvs)) return mkvs, nil }
func NewCodec(conn io.ReadWriteCloser) *codec { c := &codec{ conn: conn, r: msgp.NewReader(conn), w: msgp.NewWriter(conn), } return c }
func NewCodec(ws *websocket.Conn) *codec { c := &codec{ ws: ws, r: msgp.NewReader(nil), w: msgp.NewWriter(nil), } return c }
func (s *initialDispatchSuite) TestSpoolCancel(c *C) { var ( args = Profile{ "type": "testSleep", } appName = "application" spoolMsg, _ = msgp.AppendIntf(nil, []interface{}{map[string]interface{}(args), appName}) cancelMsg, _ = msgp.AppendIntf(nil, []interface{}{}) ) spoolDisp, err := s.d.Handle(spool, msgp.NewReader(bytes.NewReader(spoolMsg))) c.Assert(err, IsNil) c.Assert(spoolDisp, FitsTypeOf, &spoolCancelationDispatch{}) spoolDisp.Handle(spoolCancel, msgp.NewReader(bytes.NewReader(cancelMsg))) msg := <-s.dw.ch c.Assert(msg.code, DeepEquals, int64(replySpoolOk)) }
func NewCodec(db *redis.Database, channel string) *codec { c := &codec{ db: db, ch: channel, r: msgp.NewReader(nil), w: msgp.NewWriter(nil), } return c }
func (s *initialDispatchSuite) TestSpawnAndKill(c *C) { var ( opts = Profile{ "type": "testSleep", } appName = "application" executable = "test_app.exe" args = make(map[string]string, 0) env = make(map[string]string, 0) // spawnMsg = message{s.session, spawn, []interface{}{opts, appName, executable, args, env}} spawnMsg, _ = msgp.AppendIntf(nil, []interface{}{map[string]interface{}(opts), appName, executable, args, env}) killMsg, _ = msgp.AppendIntf(nil, []interface{}{}) ) spawnDisp, err := s.d.Handle(spawn, msgp.NewReader(bytes.NewReader(spawnMsg))) c.Assert(err, IsNil) c.Assert(spawnDisp, FitsTypeOf, &spawnDispatch{}) // First chunk must be empty to notify about start msg := <-s.dw.ch c.Assert(msg.code, DeepEquals, int64(replySpawnWrite)) c.Assert(msg.args, HasLen, 1) data, ok := msg.args[0].([]byte) c.Assert(ok, Equals, true) c.Assert(data, HasLen, 0) // Let's read some output msg = <-s.dw.ch c.Assert(msg.code, Equals, int64(replySpawnWrite)) c.Assert(msg.args, HasLen, 1) data, ok = msg.args[0].([]byte) c.Assert(ok, Equals, true) c.Assert(data, Not(HasLen), 0) noneDisp, err := spawnDisp.Handle(spawnKill, msgp.NewReader(bytes.NewReader(killMsg))) c.Assert(err, IsNil) c.Assert(noneDisp, IsNil) }
func BenchmarkIncidentDecode(b *testing.B) { v := Incident{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) rd := msgp.NewEndlessReader(buf.Bytes(), b) dc := msgp.NewReader(rd) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) } } }
// benchmark decoding a small, "fast" type. // the point here is to see how much garbage // is generated intrinsically by the encoding/ // decoding process as opposed to the nature // of the struct. func BenchmarkFastDecode(b *testing.B) { v := &TestFast{ Lat: 40.12398, Long: -41.9082, Alt: 201.08290, Data: []byte("whaaaaargharbl"), } var buf bytes.Buffer msgp.Encode(&buf, v) dc := msgp.NewReader(msgp.NewEndlessReader(buf.Bytes(), b)) b.SetBytes(int64(buf.Len())) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { v.DecodeMsg(dc) } }
func BenchmarkNotificationDecode(b *testing.B) { r := Notification{ Method: "Call", Body: nil, } var buf bytes.Buffer msgp.Encode(&buf, &r) byts := buf.Bytes() mr := msgp.NewReader(&buf) for i := 0; i < b.N; i++ { buf.Reset() buf.Write(byts) r.DecodeMsg(mr) } }
func BenchmarkResponseDecode(b *testing.B) { r := Response{ ID: 100, Error: "error", Body: nil, } var buf bytes.Buffer msgp.Encode(&buf, &r) byts := buf.Bytes() mr := msgp.NewReader(&buf) for i := 0; i < b.N; i++ { buf.Reset() buf.Write(byts) r.DecodeMsg(mr) } }
func (b *Broker) getData(partition int32, offset int64) ([]byte, error) { // TODO: using pool ? bb, err := b.bc.Leader(gBrokerTopic, partition) if err != nil { return nil, err } bbb := sarama.NewBroker(bb.Addr()) defer bbb.Close() err = bbb.Open(b.brokerConfig) if err != nil { return nil, err } freq := &sarama.FetchRequest{} freq.AddBlock(gBrokerTopic, partition, offset, int32(b.config.brokerMaxMessageSize)) fres, err := bbb.Fetch(freq) if err != nil { return nil, err } fresb := fres.GetBlock(gBrokerTopic, partition) msgs := fresb.MsgSet.Messages if len(msgs) == 0 { return nil, errors.New("no msg found in broker") } /* unpack */ buf := bytes.NewReader(msgs[0].Msg.Value) buf.Seek(0, 0) msgr := msgp.NewReader(buf) resi, err := msgr.ReadIntf() if err != nil { return nil, errors.New(fmt.Sprintf("fail to decode data from broker: %s", err.Error())) } res, ok := resi.(map[string]interface{}) if !ok { return nil, errors.New(fmt.Sprintf("invalid data from broker: data need be map")) } datai, ok := res["data"] if !ok { return nil, errors.New(fmt.Sprintf("invalid data from broker: data not exist")) } return datai.([]byte), nil }
// call KVAutobus keyvalue_range API func (db *KVAutobus) getKVRange(ctx storage.Context, kStart, kEnd storage.Key) (KVs, error) { // Get any request context and pass to kvautobus for tracking. reqctx, ok := ctx.(storage.RequestCtx) var reqID string if ok { parts := strings.Split(reqctx.GetRequestID(), "/") if len(parts) > 0 { reqID = parts[len(parts)-1] } } // Construct the KVAutobus URL b64key1 := encodeKey(kStart) b64key2 := encodeKey(kEnd) url := fmt.Sprintf("%s/kvautobus/api/keyvalue_range/%s/%s/%s/?=%s", db.host, db.collection, b64key1, b64key2, reqID) timedLog := dvid.NewTimeLog() resp, err := db.client.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, nil // Handle no keys found. } r := msgp.NewReader(bufio.NewReader(resp.Body)) var mkvs KVs if err := mkvs.DecodeMsg(r); err != nil { dvid.Errorf("Couldn't decode getKVRange return\n") return nil, err } for _, mkv := range mkvs { storage.StoreKeyBytesRead <- len(mkv[0]) storage.StoreValueBytesRead <- len(mkv[1]) } timedLog.Infof("[%s] PROXY keyvalue_range to %s returned %d (%d kv pairs)\n", reqID, db.host, resp.StatusCode, len(mkvs)) return mkvs, nil }
// ReadBody reads the body of the message. It is assumed the value being // decoded into is a satisfies the msgp.Decodable interface. func (c *msgpackCodec) ReadBody(v interface{}) error { if !c.body { return nil } r := msgp.NewReader(c.rwc) // Body is present, but no value to decode into. if v == nil { return r.Skip() } switch c.mt { case codec.Request, codec.Response, codec.Publication: return decodeBody(r, v) default: return fmt.Errorf("Unrecognized message type: %v", c.mt) } return nil }
func (db *KVAutobus) RawGet(key storage.Key) ([]byte, error) { b64key := encodeKey(key) url := fmt.Sprintf("%s/kvautobus/api/value/%s/", db.host, b64key) timedLog := dvid.NewTimeLog() resp, err := http.Get(url) if err != nil { return nil, err } timedLog.Infof("PROXY get to %s returned %d\n", db.host, resp.StatusCode) defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, nil // Handle no key found. } r := msgp.NewReader(bufio.NewReader(resp.Body)) var bin Binary if err := bin.DecodeMsg(r); err != nil { return nil, err } return []byte(bin), nil }
func TestIncidentEncodeDecode(t *testing.T) { v := Incident{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { t.Logf("WARNING: Msgsize() for %v is inaccurate", v) } vn := Incident{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) } buf.Reset() msgp.Encode(&buf, &v) err = msgp.NewReader(&buf).Skip() if err != nil { t.Error(err) } }
func (td *TransDi) processConsumerMessage(msg *sarama.ConsumerMessage) { /* unpack */ /* pack { 'topic' => string, 'method' => string, 'key' => string, 'data' => []byte, } */ // FIXME: need be optimized here sleep := 100 * time.Millisecond for { var err error method := gDefaultMethod if len(td.methodEnabled) > 0 { // if methods is configured, we should check if this method needed be send buf := bytes.NewReader(msg.Value) buf.Seek(0, 0) msgr := msgp.NewReader(buf) dii, err := msgr.ReadIntf() if err != nil { err = errors.New(fmt.Sprintf("fail to de-msgpack: %s", err.Error())) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } di, ok := dii.(map[string]interface{}) if !ok { err = errors.New("invalid di: should be map") logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } methodi, ok := di["method"] if !ok { err = errors.New(fmt.Sprintf("invalid di: method not exists")) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } method, ok = methodi.(string) if !ok { err = errors.New(fmt.Sprintf("invlid di: method is not string")) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } if _, ok = td.methodEnabled[method]; !ok { // not ours return } } // dispatch td.transWindow.addSlot(msg) var workerId uint32 if msg.Key == nil || len(msg.Key) == 0 || !td.serializeByKey { workerId = uint32(rand.Int31n(int32(td.workerNum))) } else { td.dispatchHasher.Reset() _, err = td.dispatchHasher.Write(msg.Key) if err != nil { err = errors.New(fmt.Sprintf("fail to compute key-hash: %s", err.Error())) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } hash := td.dispatchHasher.Sum32() workerId = hash % td.workerNum } worker := td.transWorkers[workerId] transData := &TransData{ transid: msg.Offset, topic: msg.Topic, method: method, partition: msg.Partition, data: msg.Value, } err = worker.addTrans(transData) if err != nil { logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } td.state = WAIT_ACK_NONBLOCK return } }
func NewArchiveReader(ir io.Reader) (or Reader) { sr := snappy.NewReader(ir) mr := msgp.NewReader(sr) return &ArchiveReader{mr} }
// HandleConn decodes commands from Cocaine runtime and calls dispatchers func (h *ConnectionHandler) HandleConn(conn io.ReadWriteCloser) { defer func() { conn.Close() apexctx.GetLogger(h.ctx).Errorf("Connection has been closed") }() ctx, cancel := context.WithCancel(h.ctx) defer cancel() logger := apexctx.GetLogger(h.ctx) r := msgp.NewReader(conn) LOOP: for { hasHeaders, channel, c, err := h.next(r) if err != nil { if err == io.EOF { return } apexctx.GetLogger(h.ctx).WithError(err).Errorf("next(): unable to read message") return } logger.Infof("channel %d, number %d", channel, c) dispatcher, ok := h.sessions.Get(channel) if !ok { if channel <= h.highestChannel { // dispatcher was detached from ResponseStream.OnClose // This message must be `close` message. // `channel`, `number` are parsed, skip `args` and probably `headers` logger.Infof("dispatcher for channel %d was detached", channel) r.Skip() if hasHeaders { r.Skip() } continue LOOP } h.highestChannel = channel ctx = apexctx.WithLogger(ctx, logger.WithField("channel", fmt.Sprintf("%s.%d", h.connID, channel))) rs := newResponseStream(ctx, conn, channel) rs.OnClose(func(ctx context.Context) { h.sessions.Detach(channel) }) dispatcher = h.newDispatcher(ctx, rs) } dispatcher, err = dispatcher.Handle(c, r) // NOTE: remove it when the headers are being handling properly if hasHeaders { r.Skip() } if err != nil { if err == ErrInvalidArgsNum { logger.WithError(err).Errorf("channel %d, number %d", channel, c) return } logger.WithError(err).Errorf("Handle returned an error") h.sessions.Detach(channel) continue LOOP } if dispatcher == nil { h.sessions.Detach(channel) continue LOOP } h.sessions.Attach(channel, dispatcher) } }
func (d *Deleter) doDel(w http.ResponseWriter, r *http.Request) { /* get topic and method */ logger.Debug("get one req: %s", r.URL.String()) qv := r.URL.Query() topic := qv.Get("topic") if len(topic) == 0 { msg := fmt.Sprintf("invalid query, topic cannot be empty: %s", r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } cc, ok := d.cacheClusters[topic] if !ok { msg := fmt.Sprintf("invalid query, cache cluster is not exist: cluster [%s], %s", topic, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* get post data */ if r.ContentLength == 0 { logger.Warning("post data cannot be empty") d.response(w, http.StatusBadRequest, "post data cannot be empty") return } data := make([]byte, r.ContentLength) _, err := io.ReadFull(r.Body, data) if err != nil { msg := fmt.Sprintf("fail to read post data: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* unpack */ buf := bytes.NewReader(data) buf.Seek(0, 0) msgr := msgp.NewReader(buf) reqi, err := msgr.ReadIntf() if err != nil { msg := fmt.Sprintf("fail to decode post data: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } req, ok := reqi.(map[string]interface{}) if !ok { msg := fmt.Sprintf("invalid post data: data need map", r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } keysi, ok := req[gDelKeys] if !ok { msg := fmt.Sprintf("invalid post data: %s not exist: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } keys, ok := keysi.([]interface{}) if !ok { msg := fmt.Sprintf("invalid post data: %s not array: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } if len(keys) == 0 { msg := fmt.Sprintf("invalid post data: len of %s array is 0: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } delTimei, ok := req[gDelDelTime] if !ok { msg := fmt.Sprintf("invalid post data: %s not exist: %s", gDelDelTime, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } delTime, ok := delTimei.(int64) if !ok { delTimet, ok := delTimei.(int) if !ok { delTimetu, ok := delTimei.(uint64) if !ok { msg := fmt.Sprintf("invalid post data: %s not int64 or uint64 or int: %s", gDelDelTime, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } else { delTime = int64(delTimetu) } } else { delTime = int64(delTimet) } } var curDelay int64 curDelayi, ok := req[gDelCurDelay] if !ok { curDelay = 0 } else if curDelay, ok = curDelayi.(int64); !ok { curDelayt, ok := curDelayi.(int) if !ok { curDelaytu, ok := curDelayi.(uint64) if !ok { msg := fmt.Sprintf("invalid post data: %s not int64 or int: %s", gDelCurDelay, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } else { curDelay = int64(curDelaytu) } } else { curDelay = int64(curDelayt) } } var isFromMq bool isFromMqi, ok := req[gDelFromMq] if !ok { isFromMq = false } else if isFromMq, ok = isFromMqi.(bool); !ok { msg := fmt.Sprintf("invalid post data: %s not bool: %s", gDelFromMq, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } var doneServers map[string]interface{} doneServersi, ok := req[gDoneServers] if !ok { doneServers = make(map[string]interface{}) } else if doneServers, ok = doneServersi.(map[string]interface{}); !ok { msg := fmt.Sprintf("invalid post data: %s not map[string]interface{}: %s", gDoneServers, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } var firstDel bool firstDeli, ok := req[gFirstDel] if !ok { firstDel = true } else if firstDel, ok = firstDeli.(bool); !ok { msg := fmt.Sprintf("invalid post data: %s not bool: %s", gFirstDel, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* do del */ err = cc.doDel(keys, delTime, curDelay, doneServers, isFromMq, firstDel) if err != nil { msg := fmt.Sprintf("fail to del: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusInternalServerError, msg) return } msg := fmt.Sprintf("success process: %s", r.URL.String()) logger.Notice("%s", msg) d.response(w, http.StatusOK, msg) return }