// NewPublication returns a new publication based on topic and payload func NewPublication(topic string, payload proto.Message) (*Publication, error) { payloadData, err := proto.Marshal(payload) if err != nil { return nil, err } return buildPublication("application/octetstream", topic, payloadData) }
// NewRequest builds a new request object, checking for bad data func NewRequest(service, endpoint string, payload proto.Message) (*Request, error) { payloadData, err := proto.Marshal(payload) if err != nil { return nil, err } return NewProtoRequest( service, endpoint, payloadData, ) }
// Caller returns something that implements `Caller` - allowing us to use this as our // gateway to service calls - the returned `Caller` is thread safe func (m *Mock) Caller() Caller { return func(req *client.Request, rsp proto.Message) errors.Error { m.Lock() defer m.Unlock() for _, s := range m.stubs { if s.matches(req) { if s.Responder != nil { numMatched := len(s.matched) responderRsp, err := s.Responder(numMatched, s.matched[numMatched-1]) if err != nil { return err } // put the responderRsp INTO the rsp b, _ := proto.Marshal(responderRsp) proto.Unmarshal(b, rsp) return nil } if s.Error != nil { return s.Error } // put the response INTO the rsp b, _ := proto.Marshal(s.Response) proto.Unmarshal(b, rsp) return nil } } // no match found - do default action if m.proxy != nil { return m.proxy(req, rsp) } // no default - return error return errors.NotFound("mock.notfound", "No mocked service registered to handle request.") } }
func NewRequestFromProto(req proto.Message) *Request { protoBytes := make([]byte, 0) if req != nil { protoBytes, _ = proto.Marshal(req) } result := NewRequestFromDelivery(amqp.Delivery{ Body: protoBytes, ContentType: "application/octetstream", Headers: amqp.Table(make(map[string]interface{})), }) result.unmarshaledData = req return result }
// Send will ping off a trace event func Send(e *traceproto.Event) error { // Marshal the trace here, so it's done concurrently msg, err := proto.Marshal(e) if err != nil { return err } // Send the marshaled trace, dropping if the backend is at capacity select { case traceChan <- msg: log.Tracef("Enqueued trace event message %v", e) default: // Channel is full, dropping trace :( // @todo We could instrument this, but if we're dropping traces we probably have serious problems? return fmt.Errorf("Dropping trace as channel is full") } return nil }
func response(replyTo *Request, payload proto.Message, messageType string) (rsp *Response, err error) { rsp = &Response{ messageType: messageType, delivery: replyTo.delivery, } switch replyTo.delivery.ContentType { case "application/json": rsp.payload, err = json.Marshal(payload) case "application/octetstream": rsp.payload, err = proto.Marshal(payload) default: err = fmt.Errorf("Unknown content type: %s", replyTo.delivery.ContentType) } if err != nil { rsp = nil log.Criticalf("[Server] Failed to marshal payload: %v", err) } return }