func (udp *udpListener) Start() { connection, err := net.ListenPacket("udp", udp.host) if err != nil { udp.Fatalf("Failed to listen on port. %s", err) } udp.Infof("Listening on port %s", udp.host) udp.Lock() udp.connection = connection udp.Unlock() messageCountMetricName := udp.contextName + ".receivedMessageCount" listenerTotalMetricName := "listeners.totalReceivedMessageCount" receivedByteCountMetricName := udp.contextName + ".receivedByteCount" readBuffer := make([]byte, 65535) //buffer with size = max theoretical UDP size defer close(udp.dataChannel) for { readCount, senderAddr, err := connection.ReadFrom(readBuffer) if err != nil { udp.Debugf("Error while reading: %s", err) return } udp.Debugf("AgentListener: Read %d bytes from address %s", readCount, senderAddr) readData := make([]byte, readCount) //pass on buffer in size only of read data copy(readData, readBuffer[:readCount]) metrics.BatchIncrementCounter(messageCountMetricName) metrics.BatchIncrementCounter(listenerTotalMetricName) metrics.BatchAddCounter(receivedByteCountMetricName, uint64(readCount)) udp.dataChannel <- readData } }
func (m *MessageAggregator) Write(envelope *events.Envelope) { // TODO: don't call for every message if throughput becomes a problem m.cleanupOrphanedHTTPStart() if envelope.EventType == nil { metrics.BatchIncrementCounter("MessageAggregator.uncategorizedEvents") return } switch envelope.GetEventType() { case events.Envelope_HttpStart: m.handleHTTPStart(envelope) case events.Envelope_HttpStop: startStopMessage := m.handleHTTPStop(envelope) if startStopMessage != nil { m.outputWriter.Write(startStopMessage) } case events.Envelope_CounterEvent: counterEventMessage := m.handleCounter(envelope) m.outputWriter.Write(counterEventMessage) default: metrics.BatchIncrementCounter("MessageAggregator.uncategorizedEvents") m.outputWriter.Write(envelope) } }
func (t *TCPListener) handleConnection(conn net.Conn) { defer conn.Close() defer t.removeConnection(conn) if tlsConn, ok := conn.(*tls.Conn); ok { if err := tlsConn.Handshake(); err != nil { t.logger.Warnd(map[string]interface{}{ "error": err.Error(), "address": conn.RemoteAddr().String(), }, "TLS handshake error") metrics.BatchIncrementCounter(t.receiveErrorCountMetricName) return } } var ( n uint32 bytes []byte err error ) for { err = binary.Read(conn, binary.LittleEndian, &n) if err != nil { if err != io.EOF { metrics.BatchIncrementCounter(t.receiveErrorCountMetricName) t.logger.Errorf("Error while decoding: %v", err) } break } read := bytes if cap(bytes) < int(n) { bytes = make([]byte, int(n)) } read = bytes[:n] _, err = io.ReadFull(conn, read) if err != nil { metrics.BatchIncrementCounter(t.receiveErrorCountMetricName) t.logger.Errorf("Error during i/o read: %v", err) break } envelope, err := t.unmarshaller.UnmarshallMessage(read) if err != nil { continue } metrics.BatchIncrementCounter(t.listenerTotalMetricName) metrics.BatchIncrementCounter(t.receivedMessageCountMetricName) metrics.BatchAddCounter(t.receivedByteCountMetricName, uint64(n+4)) select { case t.envelopeChan <- envelope: case <-t.stopped: return } } }
func (m *MetricsReporter) CaptureRoutingRequest(b *route.Endpoint, req *http.Request) { dropsondeMetrics.BatchIncrementCounter("total_requests") componentName, ok := b.Tags["component"] if ok && len(componentName) > 0 { dropsondeMetrics.BatchIncrementCounter(fmt.Sprintf("requests.%s", componentName)) if strings.HasPrefix(componentName, "dea-") { dropsondeMetrics.BatchIncrementCounter("routed_app_requests") } } }
func (m *MessageAggregator) handleHTTPStop(envelope *events.Envelope) *events.Envelope { if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpStopReceived") } atomic.AddUint64(&m.httpStopReceivedCount, 1) m.logger.Debugf("handling HTTP stop message %v", spew.Sprintf("%v", envelope)) stopEvent := envelope.GetHttpStop() requestID := stopEvent.RequestId.String() event := eventID{requestID: requestID, peerType: stopEvent.GetPeerType()} startEventEntry, ok := m.startEventsByEventID[event] if !ok { m.logger.Warnf("no matching HTTP start message found for %v", event) if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpUnmatchedStopReceived") } atomic.AddUint64(&m.httpUnmatchedStopReceivedCount, 1) return nil } if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.httpStartStopEmitted") } atomic.AddUint64(&m.httpStartStopEmittedCount, 1) delete(m.startEventsByEventID, event) startEvent := startEventEntry.startEvent return &events.Envelope{ Origin: envelope.Origin, Timestamp: stopEvent.Timestamp, EventType: events.Envelope_HttpStartStop.Enum(), HttpStartStop: &events.HttpStartStop{ StartTimestamp: startEvent.Timestamp, StopTimestamp: stopEvent.Timestamp, RequestId: startEvent.RequestId, PeerType: startEvent.PeerType, Method: startEvent.Method, Uri: startEvent.Uri, RemoteAddress: startEvent.RemoteAddress, UserAgent: startEvent.UserAgent, StatusCode: stopEvent.StatusCode, ContentLength: stopEvent.ContentLength, ParentRequestId: startEvent.ParentRequestId, ApplicationId: stopEvent.ApplicationId, InstanceIndex: startEvent.InstanceIndex, InstanceId: startEvent.InstanceId, }, } }
func (m *MetricsReporter) CaptureRoutingResponse(b *route.Endpoint, res *http.Response, t time.Time, d time.Duration) { dropsondeMetrics.BatchIncrementCounter(getResponseCounterName(res)) dropsondeMetrics.BatchIncrementCounter("responses") latency := float64(d / time.Millisecond) unit := "ms" dropsondeMetrics.SendValue("latency", latency, unit) componentName, ok := b.Tags["component"] if ok && len(componentName) > 0 { dropsondeMetrics.SendValue(fmt.Sprintf("latency.%s", componentName), latency, unit) } }
func (d *DopplerForwarder) Write(message *events.Envelope) { client, err := d.clientPool.RandomClient() if err != nil { d.logger.Errord(map[string]interface{}{ "error": err.Error(), }, "DopplerForwarder: can't forward message") return } messageBytes, err := proto.Marshal(message) if err != nil { d.logger.Errorf("DopplerForwarder: marshal error %v", err) metrics.BatchIncrementCounter("dropsondeMarshaller.marshalErrors") return } switch client.Scheme() { case "udp": signedMessage := signature.SignMessage(messageBytes, d.sharedSecret) if _, err := client.Write(signedMessage); err != nil { d.logger.Debugd(map[string]interface{}{ "scheme": client.Scheme(), "address": client.Address(), }, "Error writing legacy message") return } case "tls": err = binary.Write(client, binary.LittleEndian, uint32(len(messageBytes))) if err == nil { _, err = client.Write(messageBytes) } if err != nil { client.Close() d.logger.Errord(map[string]interface{}{ "scheme": client.Scheme(), "address": client.Address(), "error": err.Error(), }, "DopplerForwarder: streaming error") return } default: d.logger.Errorf("DopplerForwarder: unknown protocol, %s for %s", client.Scheme(), client.Address()) return } d.incrementMessageCount(message.GetEventType()) metrics.BatchIncrementCounter("DopplerForwarder.sentMessages") }
func (w *Wrapper) Write(client Client, message []byte) error { sentBytes, err := client.Write(message) if err != nil { w.logger.Errorf("Error writing to %s client %v\n", w.protocol, err) metrics.BatchIncrementCounter(w.protocol + ".sendErrorCount") client.Close() return err } metrics.BatchAddCounter(w.protocol+".sentByteCount", uint64(sentBytes)) metrics.BatchIncrementCounter(w.protocol + ".sentMessageCount") return nil }
func (m *EventMarshaller) incrementMessageCount(eventType events.Envelope_EventType) { incrementCount(m.messageCounts[eventType]) modifiedEventName := []rune(eventType.String()) modifiedEventName[0] = unicode.ToLower(modifiedEventName[0]) metricName := string(modifiedEventName) + "Marshalled" metrics.BatchIncrementCounter("dropsondeMarshaller." + metricName) }
func (m *MessageAggregator) Write(envelope *events.Envelope) { // TODO: don't call for every message if throughput becomes a problem m.cleanupOrphanedHTTPStart() if envelope.EventType == nil { m.outputWriter.Write(envelope) return } switch envelope.GetEventType() { case events.Envelope_HttpStart: m.handleHTTPStart(envelope) case events.Envelope_HttpStop: startStopMessage := m.handleHTTPStop(envelope) if startStopMessage != nil { m.outputWriter.Write(startStopMessage) } case events.Envelope_CounterEvent: counterEventMessage := m.handleCounter(envelope) m.outputWriter.Write(counterEventMessage) default: atomic.AddUint64(&m.uncategorizedEventCount, 1) if m.emitMetrics { metrics.BatchIncrementCounter("MessageAggregator.uncategorizedEvents") } m.logger.Debugf("passing through message %v", spew.Sprintf("%v", envelope)) m.outputWriter.Write(envelope) } }
func (agentListener *agentListener) Start() { connection, err := net.ListenPacket("udp", agentListener.host) if err != nil { agentListener.Fatalf("Failed to listen on port. %s", err) } agentListener.Infof("Listening on port %s", agentListener.host) agentListener.Lock() agentListener.connection = connection agentListener.Unlock() readBuffer := make([]byte, 65535) //buffer with size = max theoretical UDP size defer close(agentListener.dataChannel) for { readCount, senderAddr, err := connection.ReadFrom(readBuffer) if err != nil { agentListener.Debugf("Error while reading. %s", err) return } agentListener.Debugf("AgentListener: Read %d bytes from address %s", readCount, senderAddr) readData := make([]byte, readCount) //pass on buffer in size only of read data copy(readData, readBuffer[:readCount]) metrics.BatchIncrementCounter(agentListener.contextName + ".receivedMessageCount") metrics.BatchAddCounter(agentListener.contextName+".receivedByteCount", uint64(readCount)) agentListener.dataChannel <- readData } }
func (w *Writer) Write(msgBytes []byte) (int, error) { w.msgBufferLock.Lock() defer w.msgBufferLock.Unlock() prefixedBytes, err := w.prefixMessage(msgBytes) if err != nil { w.logger.Errorf("Error encoding message length: %v\n", err) metrics.BatchIncrementCounter("tls.sendErrorCount") return 0, err } switch { case w.msgBuffer.Len()+len(prefixedBytes) > w.msgBuffer.Cap(): _, err := w.retryWrites(prefixedBytes) if err != nil { dropped := w.msgBuffer.messages + 1 atomic.AddUint64(&w.droppedMessages, dropped) metrics.BatchAddCounter("MessageBuffer.droppedMessageCount", dropped) w.msgBuffer.Reset() w.msgBuffer.writeNonMessage(w.droppedLogMessage()) w.timer.Reset(w.flushDuration) return 0, err } return len(msgBytes), nil default: if w.msgBuffer.Len() == 0 { w.timer.Reset(w.flushDuration) } _, err := w.msgBuffer.Write(prefixedBytes) return len(msgBytes), err } }
func (nr *NetworkReader) Start() { connection, err := net.ListenPacket("udp4", nr.host) if err != nil { nr.logger.Fatalf("Failed to listen on port. %s", err) } nr.logger.Infof("Listening on port %s", nr.host) nr.lock.Lock() nr.connection = connection nr.lock.Unlock() readBuffer := make([]byte, 65535) //buffer with size = max theoretical UDP size for { readCount, senderAddr, err := connection.ReadFrom(readBuffer) if err != nil { nr.logger.Debugf("Error while reading. %s", err) return } nr.logger.Debugf("NetworkReader: Read %d bytes from address %s", readCount, senderAddr) readData := make([]byte, readCount) //pass on buffer in size only of read data copy(readData, readBuffer[:readCount]) atomic.AddUint64(&nr.receivedMessageCount, 1) atomic.AddUint64(&nr.receivedByteCount, uint64(readCount)) metrics.BatchIncrementCounter(nr.contextName + ".receivedMessageCount") metrics.BatchAddCounter(nr.contextName+".receivedByteCount", uint64(readCount)) nr.writer.Write(readData) } }
func (u *UDPWrapper) Write(client Client, message []byte) error { signedMessage := signature.SignMessage(message, u.sharedSecret) sentLength, err := client.Write(signedMessage) if err != nil { u.logger.Errorf("Error writing to UDP client %v\n", err) metrics.BatchIncrementCounter("udp.sendErrorCount") return err } metrics.BatchIncrementCounter("udp.sentMessageCount") metrics.BatchAddCounter("udp.sentByteCount", uint64(sentLength)) // The TLS side writes this metric in the batch.Writer. For UDP, // it needs to be done here. metrics.BatchIncrementCounter("DopplerForwarder.sentMessages") return nil }
func (m *EventMarshaller) Write(envelope *events.Envelope) { writer := m.writer() if writer == nil { m.logger.Warn("EventMarshaller: Write called while byteWriter is nil") metrics.BatchIncrementCounter("dropsondeMarshaller.nilByteWriterWrites") return } envelopeBytes, err := proto.Marshal(envelope) if err != nil { m.logger.Errorf("marshalling error: %v", err) metrics.BatchIncrementCounter("dropsondeMarshaller.marshalErrors") return } writer.Write(envelopeBytes) }
func (u *dropsondeMarshaller) incrementMessageCount(eventType events.Envelope_EventType) { metricName := metricNames[eventType] if metricName == "" { metricName = "dropsondeMarshaller.unknownEventTypeReceived" } metrics.BatchIncrementCounter(metricName) }
func (u *EventUnmarshaller) incrementReceiveCount(eventType events.Envelope_EventType) { switch eventType { case events.Envelope_LogMessage: // LogMessage is a special case. `logMessageReceived` used to be broken out by app ID, and // `logMessageTotal` was the sum of all of those. metrics.BatchIncrementCounter("dropsondeUnmarshaller.logMessageTotal") incrementCount(u.receiveCounts[events.Envelope_LogMessage]) default: name := eventType.String() modifiedEventName := []rune(name) modifiedEventName[0] = unicode.ToLower(modifiedEventName[0]) metricName := string(modifiedEventName) + "Received" metrics.BatchIncrementCounter("dropsondeUnmarshaller." + metricName) incrementCount(u.receiveCounts[eventType]) } }
func (u *dropsondeUnmarshaller) incrementReceiveCount(eventType events.Envelope_EventType) { modifiedEventName := []rune(eventType.String()) modifiedEventName[0] = unicode.ToLower(modifiedEventName[0]) metricName := string(modifiedEventName) + "Received" metrics.BatchIncrementCounter("dropsondeUnmarshaller." + metricName) incrementCount(u.receiveCounts[eventType]) }
func (m *MessageAggregator) cleanupOrphanedHTTPStart() { currentTime := time.Now() for key, eventEntry := range m.startEventsByEventID { if currentTime.Sub(eventEntry.entryTime) > MaxTTL { metrics.BatchIncrementCounter("MessageAggregator.httpUnmatchedStartReceived") delete(m.startEventsByEventID, key) } } }
// Run validates signatures. It consumes signed messages from inputChan, // verifies the signature, and sends the message (sans signature) to outputChan. // Invalid messages are dropped and nothing is sent to outputChan. Thus a reader // of outputChan is guaranteed to receive only messages with a valid signature. // // Run blocks on sending to outputChan, so the channel must be drained for the // function to continue consuming from inputChan. func (v *Verifier) Run(inputChan <-chan []byte, outputChan chan<- []byte) { for signedMessage := range inputChan { if len(signedMessage) < SIGNATURE_LENGTH { v.logger.Warnf("signatureVerifier: missing signature for message %v", signedMessage) metrics.BatchIncrementCounter("signatureVerifier.missingSignatureErrors") continue } signature, message := signedMessage[:SIGNATURE_LENGTH], signedMessage[SIGNATURE_LENGTH:] if v.verifyMessage(message, signature) { outputChan <- message metrics.BatchIncrementCounter("signatureVerifier.validSignatures") } else { v.logger.Warnf("signatureVerifier: invalid signature for message %v", message) metrics.BatchIncrementCounter("signatureVerifier.invalidSignatureErrors") } } }
func (d *DopplerForwarder) Write(message []byte) { client, err := d.clientPool.RandomClient() if err != nil { d.logger.Errorf("can't forward message: %v", err) return } client.Send(message) metrics.BatchIncrementCounter("DopplerForwarder.sentMessages") }
func (u *dropsondeUnmarshaller) incrementReceiveCount(eventType events.Envelope_EventType) error { var err error switch eventType { case events.Envelope_LogMessage: // LogMessage is a special case. `logMessageReceived` used to be broken out by app ID, and // `logMessageTotal` was the sum of all of those. metrics.BatchIncrementCounter("dropsondeUnmarshaller.logMessageTotal") default: metricName := metricNames[eventType] if metricName == "" { metricName = "dropsondeUnmarshaller.unknownEventTypeReceived" err = fmt.Errorf("dropsondeUnmarshaller: received unknown event type %#v", eventType) } metrics.BatchIncrementCounter(metricName) } return err }
func (m *MessageAggregator) handleHTTPStart(envelope *events.Envelope) { metrics.BatchIncrementCounter("MessageAggregator.httpStartReceived") logging.Debugf(m.logger, "handling HTTP start message for appID: %v", envelope.GetHttpStart().GetApplicationId()) startEvent := envelope.GetHttpStart() requestID := startEvent.RequestId.String() event := eventID{requestID: requestID, peerType: startEvent.GetPeerType()} m.startEventsByEventID[event] = startEventEntry{startEvent: startEvent, entryTime: time.Now()} }
func (m *MessageAggregator) handleHTTPStart(envelope *events.Envelope) { metrics.BatchIncrementCounter("MessageAggregator.httpStartReceived") m.logger.Debugf("handling HTTP start message %v", spew.Sprintf("%v", envelope)) startEvent := envelope.GetHttpStart() requestID := startEvent.RequestId.String() event := eventID{requestID: requestID, peerType: startEvent.GetPeerType()} m.startEventsByEventID[event] = startEventEntry{startEvent: startEvent, entryTime: time.Now()} }
// Send sends the log message with the envelope timestamp set to now and the // log message timestamp set to now if none was provided by SetTimestamp. func (c logChainer) Send() error { metrics.BatchIncrementCounter("logSenderTotalMessagesRead") c.envelope.Timestamp = proto.Int64(time.Now().UnixNano()) if c.envelope.LogMessage.Timestamp == nil { c.envelope.LogMessage.Timestamp = proto.Int64(time.Now().UnixNano()) } return c.emitter.EmitEnvelope(c.envelope) }
func (w *Writer) flushBuffer() { w.msgBufferLock.Lock() defer w.msgBufferLock.Unlock() if w.msgBuffer.Len() == 0 { return } if _, err := w.flushWrite(nil); err != nil { metrics.BatchIncrementCounter("DopplerForwarder.retryCount") w.timer.Reset(w.flushDuration) } }
func (m *EventMarshaller) Write(message *events.Envelope) { messageBytes, err := proto.Marshal(message) if err != nil { m.logger.Errorf("eventMarshaller: marshal error %v for message %v", err, message) metrics.BatchIncrementCounter("dropsondeMarshaller.marshalErrors") return } m.logger.Debugf("eventMarshaller: marshalled message %v", spew.Sprintf("%v", message)) m.incrementMessageCount(message.GetEventType()) m.outputWriter.Write(messageBytes) }
func (w *Writer) retryWrites(message []byte) (sent int, err error) { for i := 0; i < maxOverflowTries; i++ { if i > 0 { metrics.BatchIncrementCounter("DopplerForwarder.retryCount") } sent, err = w.flushWrite(message) if err == nil { return sent, nil } } return 0, err }
func (u *LegacyUnmarshaller) unmarshalMessage(message []byte) (*logmessage.LogEnvelope, error) { envelope := &logmessage.LogEnvelope{} err := proto.Unmarshal(message, envelope) if err != nil { u.logger.Debugf("legacyUnmarshaller: unmarshal error %v for message %v", err, message) metrics.BatchIncrementCounter("legacyUnmarshaller.unmarshalErrors") return nil, err } u.logger.Debugf("legacyUnmarshaller: received message %v", spew.Sprintf("%v", envelope)) return envelope, nil }
func (u *EventUnmarshaller) incrementLogMessageReceiveCount(appID string) { metrics.BatchIncrementCounter("EventUnmarshaller.logMessageTotal") _, ok := u.logMessageReceiveCounts[appID] if ok == false { var count uint64 u.lock.Lock() u.logMessageReceiveCounts[appID] = &count u.lock.Unlock() } incrementCount(u.logMessageReceiveCounts[appID]) incrementCount(u.receiveCounts[events.Envelope_LogMessage]) }