Beispiel #1
0
/* validate if DNS packet, make conntable entry and output
   to log channel if there is a match

   we pass packet by value here because we turned on ZeroCopy for the capture, which reuses the capture buffer
*/
func handlePacket(packets chan *packetData, logC chan dnsLogEntry,
	gcInterval time.Duration, gcAge time.Duration, threadNum int,
	stats *statsd.StatsdBuffer) {

	//DNS IDs are stored as uint16s by the gopacket DNS layer
	var conntable = make(map[uint16]dnsMapEntry)

	//setup garbage collection for this map
	go cleanDnsCache(&conntable, gcAge, gcInterval, threadNum, stats)

	//TCP reassembly init
	streamFactory := &dnsStreamFactory{}
	streamPool := tcpassembly.NewStreamPool(streamFactory)
	assembler := tcpassembly.NewAssembler(streamPool)
	ticker := time.Tick(time.Minute)

	for {
		select {
		case packet, more := <-packets:

			//used for clean shutdowns
			if !more {
				return
			}

			err := packet.Parse()

			if err != nil {
				log.Debugf("Error parsing packet: %s", err)
				continue
			}

			srcIP := packet.GetSrcIP()
			dstIP := packet.GetDstIP()

			//All TCP goes to reassemble.  This is first because a single packet DNS request will parse as DNS
			//But that will leave the connection hanging around in memory, because the inital handshake won't
			//parse as DNS, nor will the connection closing.

			if packet.IsTCPStream() {
				handleDns(&conntable, packet.GetDNSLayer(), logC, srcIP, dstIP)
			} else if packet.HasTCPLayer() {
				assembler.AssembleWithTimestamp(packet.GetIPLayer().NetworkFlow(),
					packet.GetTCPLayer(), *packet.GetTimestamp())
				continue
			} else if packet.HasDNSLayer() {
				handleDns(&conntable, packet.GetDNSLayer(), logC, srcIP, dstIP)
				if stats != nil {
					stats.Incr(strconv.Itoa(threadNum)+".dns_lookups", 1)
				}
			} else {
				//UDP and doesn't parse as DNS?
				log.Debug("Missing a DNS layer?")
			}
		case <-ticker:
			// Every minute, flush connections that haven't seen activity in the past 2 minutes.
			assembler.FlushOlderThan(time.Now().Add(time.Minute * -2))
		}
	}
}
Beispiel #2
0
func watchLogStats(stats *statsd.StatsdBuffer, logC chan dnsLogEntry, logs []chan dnsLogEntry) {
	for {
		stats.Gauge("incoming_log_depth", int64(len(logC)))
		for i, logChan := range logs {
			stats.Gauge(strconv.Itoa(i)+".log_depth", int64(len(logChan)))
		}

		time.Sleep(3)
	}
}
func metricEvent(s *statsd.StatsdBuffer, c *docker.Client, event *docker.APIEvents) {
	s.Incr(event.Status, 1)
	container, err := c.InspectContainer(event.ID)
	if err != nil {
		log.WithFields(log.Fields{"containerid": event.ID, "error": err}).Warn("could not inspect container")
		return
	}

	s.Incr(fmt.Sprintf("%s.%s", event.Status, shortenImageName(container.Config.Image)), 1)
}
Beispiel #4
0
func goStats(cw *cloudwatch.CloudWatch, requests *[]cloudwatch.GetMetricStatisticsRequest, now time.Time, statsCli *statsd.StatsdBuffer) {
	log.WithField("now", now).Debug("tick")
	for _, request := range *requests {
		request.EndTime = now
		request.StartTime = now.Add(time.Minute * -1) // 1 minute ago

		response, err := cw.GetMetricStatistics(&request)
		if err != nil {
			log.WithField("error", err).Warn("could not retrieve metric from cloudwatch")
		}

		for _, dp := range response.GetMetricStatisticsResult.Datapoints {
			statsCli.Absolute(fmt.Sprintf("%s.%s", strings.ToLower(request.Dimensions[0].Value), strings.ToLower(request.MetricName)), int64(dp.Sum))
		}
	}
}
Beispiel #5
0
//background task to clear out stale entries in the conntable
//one of these gets spun up for every packet handling thread
//takes a pointer to the contable to clean, the maximum age of an entry and how often to run GC
func cleanDnsCache(conntable *map[uint16]dnsMapEntry, maxAge time.Duration,
	interval time.Duration, threadNum int, stats *statsd.StatsdBuffer) {

	for {
		time.Sleep(interval)

		//max_age should be negative, e.g. -1m
		cleanupCutoff := time.Now().Add(maxAge)
		if stats != nil {
			stats.Gauge(strconv.Itoa(threadNum)+".cache_size", int64(len(*conntable)))
		}
		for key, item := range *conntable {
			if item.inserted.Before(cleanupCutoff) {
				log.Debug("conntable GC(" + strconv.Itoa(threadNum) + "): cleanup query ID " + strconv.Itoa(int(key)))
				delete(*conntable, key)
				if stats != nil {
					stats.Incr(strconv.Itoa(threadNum)+".cache_entries_dropped", 1)
				}
			}
		}
	}
}
Beispiel #6
0
//kick off packet procesing threads and start the packet capture loop
func doCapture(handle *pcap.Handle, logChan chan dnsLogEntry,
	config *pdnsConfig, reChan chan tcpDataStruct,
	stats *statsd.StatsdBuffer) {

	gcAgeDur, err := time.ParseDuration(config.gcAge)

	if err != nil {
		log.Fatal("Your gc_age parameter was not parseable.  Use a string like '-1m'")
	}

	gcIntervalDur, err := time.ParseDuration(config.gcInterval)

	if err != nil {
		log.Fatal("Your gc_age parameter was not parseable.  Use a string like '3m'")
	}

	//setup the global channel for reassembled TCP streams
	reassembleChan = reChan

	/* init channels for the packet handlers and kick off handler threads */
	var channels []chan *packetData
	for i := 0; i < config.numprocs; i++ {
		channels = append(channels, make(chan *packetData, 100))
	}

	for i := 0; i < config.numprocs; i++ {
		go handlePacket(channels[i], logChan, gcIntervalDur, gcAgeDur, i, stats)
	}

	// Use the handle as a packet source to process all packets
	packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
	//only decode packet in response to function calls, this moves the
	//packet processing to the processing threads
	packetSource.DecodeOptions.Lazy = true
	//We don't mutate bytes of the packets, so no need to make a copy
	//this does mean we need to pass the packet via the channel, not a pointer to the packet
	//as the underlying buffer will get re-allocated
	packetSource.DecodeOptions.NoCopy = true

	/*
		parse up to the IP layer so we can consistently balance the packets across our
		processing threads

		TODO: in the future maybe pass this on the channel to so we don't reparse
				but the profiling I've done doesn't point to this as a problem
	*/

	var ethLayer layers.Ethernet
	var ipLayer layers.IPv4

	parser := gopacket.NewDecodingLayerParser(
		layers.LayerTypeEthernet,
		&ethLayer,
		&ipLayer,
	)

	foundLayerTypes := []gopacket.LayerType{}

CAPTURE:
	for {
		select {
		case reassembledTcp := <-reChan:
			pd := NewTcpData(reassembledTcp)
			channels[int(reassembledTcp.IpLayer.FastHash())&(config.numprocs-1)] <- pd
			if stats != nil {
				stats.Incr("reassembed_tcp", 1)
			}
		case packet := <-packetSource.Packets():
			if packet != nil {
				parser.DecodeLayers(packet.Data(), &foundLayerTypes)
				if foundLayerType(layers.LayerTypeIPv4, foundLayerTypes) {
					pd := NewPacketData(packet)
					channels[int(ipLayer.NetworkFlow().FastHash())&(config.numprocs-1)] <- pd
					if stats != nil {
						stats.Incr("packets", 1)
					}
				}
			} else {
				//if we get here, we're likely reading a pcap and we've finished
				//or, potentially, the physical device we've been reading from has been
				//downed.  Or something else crazy has gone wrong...so we break
				//out of the capture loop entirely.

				log.Debug("packetSource returned nil.")
				break CAPTURE
			}
		}
	}

	gracefulShutdown(channels, reChan, logChan)

}
Beispiel #7
0
func (stat *Stat) report(statsd *statsd.StatsdBuffer) {
	statsd.Timing(`requesttime.`+stat.path, int64(Round(stat.responseTime)))
}