Esempio n. 1
0
// Called when a gap of nbytes bytes is found in the stream (due to
// packet loss).
func (http *Http) GapInStream(tcptuple *common.TcpTuple, dir uint8,
	nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) {

	defer logp.Recover("GapInStream(http) exception")

	if private == nil {
		return private, false
	}
	httpData, ok := private.(httpPrivateData)
	if !ok {
		return private, false
	}
	stream := httpData.Data[dir]
	if stream == nil || stream.message == nil {
		// nothing to do
		return private, false
	}

	ok, complete := http.messageGap(stream, nbytes)
	logp.Debug("httpdetailed", "messageGap returned ok=%v complete=%v", ok, complete)
	if !ok {
		// on errors, drop stream
		httpData.Data[dir] = nil
		return httpData, true
	}

	if complete {
		// Current message is complete, we need to publish from here
		http.messageComplete(tcptuple, dir, stream)
	}

	// don't drop the stream, we can ignore the gap
	return private, false
}
Esempio n. 2
0
func (dns *Dns) ParseUdp(pkt *protos.Packet) {
	defer logp.Recover("Dns ParseUdp")

	logp.Debug("dns", "Parsing packet addressed with %s of length %d.",
		pkt.Tuple.String(), len(pkt.Payload))

	dnsPkt, err := decodeDnsPacket(pkt.Payload)
	if err != nil {
		// This means that malformed requests or responses are being sent or
		// that someone is attempting to the DNS port for non-DNS traffic. Both
		// are issues that a monitoring system should report.
		logp.Debug("dns", NonDnsPacketMsg+" addresses %s, length %d",
			pkt.Tuple.String(), len(pkt.Payload))
		return
	}

	dnsTuple := DnsTupleFromIpPort(&pkt.Tuple, TransportUdp, dnsPkt.ID)
	dnsMsg := &DnsMessage{
		Ts:           pkt.Ts,
		Tuple:        pkt.Tuple,
		CmdlineTuple: procs.ProcWatcher.FindProcessesTuple(&pkt.Tuple),
		Data:         dnsPkt,
		Length:       len(pkt.Payload),
	}

	if dnsMsg.Data.QR == Query {
		dns.receivedDnsRequest(&dnsTuple, dnsMsg)
	} else /* Response */ {
		dns.receivedDnsResponse(&dnsTuple, dnsMsg)
	}
}
Esempio n. 3
0
func FindPidsByCmdlineGrep(prefix string, process string) ([]int, error) {
	defer logp.Recover("FindPidsByCmdlineGrep exception")
	pids := []int{}

	proc, err := os.Open(filepath.Join(prefix, "/proc"))
	if err != nil {
		return pids, fmt.Errorf("Open /proc: %s", err)
	}

	names, err := proc.Readdirnames(0)
	if err != nil {
		return pids, fmt.Errorf("Readdirnames: %s", err)
	}

	for _, name := range names {
		pid, err := strconv.Atoi(name)
		if err != nil {
			continue
		}

		cmdline, err := ioutil.ReadFile(filepath.Join(prefix, "/proc/", name, "cmdline"))
		if err != nil {
			continue
		}

		if strings.Index(string(cmdline), process) >= 0 {
			pids = append(pids, pid)
		}
	}

	return pids, nil
}
Esempio n. 4
0
func (tcp *Tcp) Process(tcphdr *layers.TCP, pkt *protos.Packet) {

	// This Recover should catch all exceptions in
	// protocol modules.
	defer logp.Recover("FollowTcp exception")

	stream, exists := tcp.streamsMap[pkt.Tuple.Hashable()]
	var original_dir uint8 = TcpDirectionOriginal
	created := false
	if !exists {
		stream, exists = tcp.streamsMap[pkt.Tuple.RevHashable()]
		if !exists {
			protocol := tcp.decideProtocol(&pkt.Tuple)
			if protocol == protos.UnknownProtocol {
				// don't follow
				return
			}
			logp.Debug("tcp", "Stream doesn't exists, creating new")

			// create
			stream = &TcpStream{id: tcp.getId(), tuple: &pkt.Tuple, protocol: protocol, tcp: tcp}
			stream.tcptuple = common.TcpTupleFromIpPort(stream.tuple, stream.id)
			tcp.streamsMap[pkt.Tuple.Hashable()] = stream
			created = true
		} else {
			original_dir = TcpDirectionReverse
		}
	}
	tcp_start_seq := tcphdr.Seq
	tcp_seq := tcp_start_seq + uint32(len(pkt.Payload))

	logp.Debug("tcp", "pkt.start_seq=%v pkt.last_seq=%v stream.last_seq=%v (len=%d)",
		tcp_start_seq, tcp_seq, stream.lastSeq[original_dir], len(pkt.Payload))

	if len(pkt.Payload) > 0 &&
		stream.lastSeq[original_dir] != 0 {

		if TcpSeqBeforeEq(tcp_seq, stream.lastSeq[original_dir]) {

			logp.Debug("tcp", "Ignoring what looks like a retrasmitted segment. pkt.seq=%v len=%v stream.seq=%v",
				tcphdr.Seq, len(pkt.Payload), stream.lastSeq[original_dir])
			return
		}

		if TcpSeqBefore(stream.lastSeq[original_dir], tcp_start_seq) {
			if !created {
				logp.Debug("tcp", "Gap in tcp stream. last_seq: %d, seq: %d", stream.lastSeq[original_dir], tcp_start_seq)
				drop := stream.GapInStream(original_dir,
					int(tcp_start_seq-stream.lastSeq[original_dir]))
				if drop {
					logp.Debug("tcp", "Dropping stream because of gap")
					stream.Expire()
				}
			}
		}
	}
	stream.lastSeq[original_dir] = tcp_seq

	stream.AddPacket(pkt, tcphdr, original_dir)
}
Esempio n. 5
0
func (mysql *Mysql) GapInStream(tcptuple *common.TcpTuple, dir uint8,
	nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) {

	defer logp.Recover("GapInStream(mysql) exception")

	if private == nil {
		return private, false
	}
	mysqlData, ok := private.(mysqlPrivateData)
	if !ok {
		return private, false
	}
	stream := mysqlData.Data[dir]
	if stream == nil || stream.message == nil {
		// nothing to do
		return private, false
	}

	if mysql.messageGap(stream, nbytes) {
		// we need to publish from here
		mysql.messageComplete(tcptuple, dir, stream)
	}

	// we always drop the TCP stream. Because it's binary and len based,
	// there are too few cases in which we could recover the stream (maybe
	// for very large blobs, leaving that as TODO)
	return private, true
}
Esempio n. 6
0
// GapInStream is called by TCP layer when a packets are missing from the tcp
// stream.
func (mc *Memcache) GapInStream(
	tcptuple *common.TcpTuple,
	dir uint8, nbytes int,
	private protos.ProtocolData,
) (priv protos.ProtocolData, drop bool) {
	debug("memcache(tcp) stream gap detected")

	defer logp.Recover("GapInStream(memcache) exception")
	if !isMemcacheConnection(private) {
		return private, false
	}

	conn := private.(*tcpConnectionData)
	stream := conn.Streams[dir]
	parser := stream.parser
	msg := parser.message

	if msg != nil {
		if msg.IsRequest {
			msg.AddNotes(NoteRequestPacketLoss)
		} else {
			msg.AddNotes(NoteResponsePacketLoss)
		}
	}

	// If we are about to read binary data (length) encoded, but missing gab
	// does fully cover data area, we might be able to continue processing the
	// stream + transactions
	inData := parser.state == parseStateDataBinary ||
		parser.state == parseStateIncompleteDataBinary ||
		parser.state == parseStateData ||
		parser.state == parseStateIncompleteData
	if inData {
		if msg == nil {
			logp.WTF("parser message is nil on data load")
			return private, true
		}

		alreadyRead := stream.Buf.Len() - int(msg.bytesLost)
		dataRequired := int(msg.bytes) - alreadyRead
		if nbytes <= dataRequired {
			// yay, all bytes included in message binary data part.
			// just drop binary data part and recover parsing.
			if msg.isBinary {
				parser.state = parseStateIncompleteDataBinary
			} else {
				parser.state = parseStateIncompleteData
			}
			msg.bytesLost += uint(nbytes)
			return private, false
		}
	}

	// need to drop TCP stream. But try to publish all cached trancsactions first
	mc.pushAllTCPTrans(conn.connection)
	return private, true
}
Esempio n. 7
0
func (mysql *Mysql) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple,
	dir uint8, private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParseMysql exception")

	priv := mysqlPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(mysqlPrivateData)
		if !ok {
			priv = mysqlPrivateData{}
		}
	}

	if priv.Data[dir] == nil {
		priv.Data[dir] = &MysqlStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &MysqlMessage{Ts: pkt.Ts},
		}
	} else {
		// concatenate bytes
		priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...)
		if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("mysql", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	stream := priv.Data[dir]
	for len(stream.data) > 0 {
		if stream.message == nil {
			stream.message = &MysqlMessage{Ts: pkt.Ts}
		}

		ok, complete := mysqlMessageParser(priv.Data[dir])
		//logp.Debug("mysqldetailed", "mysqlMessageParser returned ok=%b complete=%b", ok, complete)
		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("mysql", "Ignore MySQL message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {
			mysql.messageComplete(tcptuple, dir, stream)
		} else {
			// wait for more data
			break
		}
	}
	return priv
}
Esempio n. 8
0
// Parse function is used to process TCP payloads.
func (http *HTTP) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple,
	dir uint8, private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParseHttp exception")

	detailedf("Payload received: [%s]", pkt.Payload)

	priv := httpConnectionData{}
	if private != nil {
		var ok bool
		priv, ok = private.(httpConnectionData)
		if !ok {
			priv = httpConnectionData{}
		}
	}

	if priv.Streams[dir] == nil {
		priv.Streams[dir] = &stream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &message{Ts: pkt.Ts},
		}

	} else {
		// concatenate bytes
		priv.Streams[dir].data = append(priv.Streams[dir].data, pkt.Payload...)
		if len(priv.Streams[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			debugf("Stream data too large, dropping TCP stream")
			priv.Streams[dir] = nil
			return priv
		}
	}
	stream := priv.Streams[dir]
	if stream.message == nil {
		stream.message = &message{Ts: pkt.Ts}
	}

	parser := newParser(&http.parserConfig)
	ok, complete := parser.parse(stream)
	if !ok {
		// drop this tcp stream. Will retry parsing with the next
		// segment in it
		priv.Streams[dir] = nil
		return priv
	}

	if complete {
		// all ok, ship it
		http.messageComplete(tcptuple, dir, stream)
	}

	return priv
}
Esempio n. 9
0
func (redis *Redis) Parse(
	pkt *protos.Packet,
	tcptuple *common.TcpTuple,
	dir uint8,
	private protos.ProtocolData,
) protos.ProtocolData {
	defer logp.Recover("ParseRedis exception")

	conn := ensureRedisConnection(private)
	conn = redis.doParse(conn, pkt, tcptuple, dir)
	if conn == nil {
		return nil
	}
	return conn
}
Esempio n. 10
0
func (mongodb *Mongodb) Parse(
	pkt *protos.Packet,
	tcptuple *common.TcpTuple,
	dir uint8,
	private protos.ProtocolData,
) protos.ProtocolData {
	defer logp.Recover("ParseMongodb exception")
	debugf("Parse method triggered")

	conn := ensureMongodbConnection(private)
	conn = mongodb.doParse(conn, pkt, tcptuple, dir)
	if conn == nil {
		return nil
	}
	return conn
}
Esempio n. 11
0
// Parse is called from TCP layer when payload data is available for parsing.
func (mc *Memcache) Parse(
	pkt *protos.Packet,
	tcptuple *common.TcpTuple,
	dir uint8,
	private protos.ProtocolData,
) protos.ProtocolData {
	defer logp.Recover("ParseMemcache(TCP) exception")

	tcpConn := ensureMemcacheConnection(private)
	debug("memcache connection %p", tcpConn)
	tcpConn = mc.memcacheParseTCP(tcpConn, pkt, tcptuple, dir)
	if tcpConn == nil {
		// explicitely return nil if tcpConn equals nil so ProtocolData really is nil
		return nil
	}
	return tcpConn
}
Esempio n. 12
0
// Called when there's a drop packet
func (pgsql *Pgsql) GapInStream(tcptuple *common.TcpTuple, dir uint8,
	nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) {

	defer logp.Recover("GapInPgsqlStream exception")

	if private == nil {
		return private, false
	}
	pgsqlData, ok := private.(pgsqlPrivateData)
	if !ok {
		return private, false
	}
	if pgsqlData.Data[dir] == nil {
		return pgsqlData, false
	}

	// If enough data was received, send it to the
	// next layer but mark it as incomplete.
	stream := pgsqlData.Data[dir]
	if messageHasEnoughData(stream.message) {
		logp.Debug("pgsql", "Message not complete, but sending to the next layer")
		m := stream.message
		m.toExport = true
		m.end = stream.parseOffset
		if m.IsRequest {
			m.Notes = append(m.Notes, "Packet loss while capturing the request")
		} else {
			m.Notes = append(m.Notes, "Packet loss while capturing the response")
		}

		msg := stream.data[stream.message.start:stream.message.end]
		pgsql.handlePgsql(pgsql, stream.message, tcptuple, dir, msg)

		// and reset message
		stream.PrepareForNewMessage()
	}
	return pgsqlData, true
}
Esempio n. 13
0
func (proc *ProcessesWatcher) FindProc(port uint16) (procname string) {
	procname = ""
	defer logp.Recover("FindProc exception")

	p, exists := proc.PortProcMap[port]
	if exists {
		return p.Proc.Name
	}

	now := time.Now()

	if now.Sub(proc.LastMapUpdate) > proc.MaxReadFreq {
		proc.LastMapUpdate = now
		proc.UpdateMap()

		// try again
		p, exists := proc.PortProcMap[port]
		if exists {
			return p.Proc.Name
		}
	}

	return ""
}
Esempio n. 14
0
func (tcp *Tcp) Process(tcphdr *layers.TCP, pkt *protos.Packet) {

	// This Recover should catch all exceptions in
	// protocol modules.
	defer logp.Recover("Process tcp exception")

	stream := tcp.getStream(pkt.Tuple.Hashable())
	var original_dir uint8 = TcpDirectionOriginal
	created := false
	if stream == nil {
		stream = tcp.getStream(pkt.Tuple.RevHashable())
		if stream == nil {
			protocol := tcp.decideProtocol(&pkt.Tuple)
			if protocol == protos.UnknownProtocol {
				// don't follow
				return
			}

			timeout := time.Duration(0)
			mod := tcp.protocols.GetTcp(protocol)
			if mod != nil {
				timeout = mod.ConnectionTimeout()
			}

			logp.Debug("tcp", "Stream doesn't exist, creating new")

			// create
			stream = &TcpStream{id: tcp.getId(), tuple: &pkt.Tuple, protocol: protocol, tcp: tcp}
			stream.tcptuple = common.TcpTupleFromIpPort(stream.tuple, stream.id)
			tcp.streams.PutWithTimeout(pkt.Tuple.Hashable(), stream, timeout)
			created = true
		} else {
			original_dir = TcpDirectionReverse
		}
	}
	tcp_start_seq := tcphdr.Seq
	tcp_seq := tcp_start_seq + uint32(len(pkt.Payload))

	logp.Debug("tcp", "pkt.start_seq=%v pkt.last_seq=%v stream.last_seq=%v (len=%d)",
		tcp_start_seq, tcp_seq, stream.lastSeq[original_dir], len(pkt.Payload))

	if len(pkt.Payload) > 0 &&
		stream.lastSeq[original_dir] != 0 {

		if tcpSeqBeforeEq(tcp_seq, stream.lastSeq[original_dir]) {
			logp.Debug("tcp", "Ignoring what looks like a retransmitted segment. pkt.seq=%v len=%v stream.seq=%v",
				tcphdr.Seq, len(pkt.Payload), stream.lastSeq[original_dir])
			return
		}

		if tcpSeqBefore(stream.lastSeq[original_dir], tcp_start_seq) {
			if !created {
				logp.Debug("tcp", "Gap in tcp stream. last_seq: %d, seq: %d", stream.lastSeq[original_dir], tcp_start_seq)
				drop := stream.gapInStream(original_dir,
					int(tcp_start_seq-stream.lastSeq[original_dir]))
				if drop {
					logp.Debug("tcp", "Dropping stream because of gap")
					tcp.streams.Delete(stream.tuple.Hashable())
				}
			}
		}
	}
	stream.lastSeq[original_dir] = tcp_seq

	stream.addPacket(pkt, tcphdr, original_dir)
}
Esempio n. 15
0
func (redis *Redis) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple, dir uint8,
	private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParseRedis exception")

	priv := redisPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(redisPrivateData)
		if !ok {
			priv = redisPrivateData{}
		}
	}

	if priv.Data[dir] == nil {
		priv.Data[dir] = &RedisStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &RedisMessage{Ts: pkt.Ts},
		}
	} else {
		// concatenate bytes
		priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...)
		if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("redis", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	stream := priv.Data[dir]
	for len(stream.data) > 0 {
		if stream.message == nil {
			stream.message = &RedisMessage{Ts: pkt.Ts}
		}

		ok, complete := redisMessageParser(priv.Data[dir])

		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("redis", "Ignore Redis message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {

			if stream.message.IsRequest {
				logp.Debug("redis", "REDIS request message: %s", stream.message.Message)
			} else {
				logp.Debug("redis", "REDIS response message: %s", stream.message.Message)
			}

			// all ok, go to next level
			redis.handleRedis(stream.message, tcptuple, dir)

			// and reset message
			stream.PrepareForNewMessage()
		} else {
			// wait for more data
			break
		}
	}

	return priv
}
Esempio n. 16
0
func (mc *Memcache) ParseUdp(pkt *protos.Packet) {
	defer logp.Recover("ParseMemcache(UDP) exception")

	buffer := streambuf.NewFixed(pkt.Payload)
	header, err := parseUdpHeader(buffer)
	if err != nil {
		debug("parsing memcache udp header failed")
		return
	}

	debug("new udp datagram requestId=%v, seqNumber=%v, numDatagrams=%v",
		header.requestId, header.seqNumber, header.numDatagrams)

	// find connection object based on ips and ports (forward->reverse connection)
	connection, dir := mc.getUdpConnection(&pkt.Tuple)
	debug("udp connection: %p", connection)

	// get udp transaction combining forward/reverse direction 'streams'
	// for current requestId
	trans := connection.udpTransactionForId(header.requestId)
	debug("udp transaction (id=%v): %p", header.requestId, trans)

	// Clean old transaction. We do the cleaning after potentially adding a new
	// transaction to the connection object, so connection object will not be
	// cleaned accidentally (not bad, but let's rather reuse it)
	expTrans := mc.udpExpTrans.steal()
	for expTrans != nil {
		tmp := expTrans.next
		expTrans.connection.killTransaction(expTrans)
		expTrans = tmp
	}

	// get UDP transaction stream combining datagram packets in transaction
	udpMsg := trans.udpMessageForDir(&header, dir)
	if udpMsg.numDatagrams != header.numDatagrams {
		logp.Warn("number of datagram mismatches in stream")
		connection.killTransaction(trans)
		return
	}

	// try to combine datagrams into complete memcached message
	payload := udpMsg.addDatagram(&header, buffer.Bytes())
	done := false
	if payload != nil {
		// parse memcached message
		msg, err := parseUdp(&mc.config, pkt.Ts, payload)
		if err != nil {
			logp.Warn("failed to parse memcached(UDP) message: %s", err)
			connection.killTransaction(trans)
			return
		}

		// apply memcached to transaction
		done, err = mc.onUdpMessage(trans, &pkt.Tuple, dir, msg)
		if err != nil {
			logp.Warn("error processing memcache message: %s", err)
			connection.killTransaction(trans)
			done = true
		}
	}
	if !done {
		trans.timer = time.AfterFunc(mc.udpConfig.transTimeout, func() {
			debug("transaction timeout -> forward")
			mc.onUdpTrans(trans)
			mc.udpExpTrans.push(trans)
		})
	}
}
Esempio n. 17
0
func (mongodb *Mongodb) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple, dir uint8,
	private protos.ProtocolData) protos.ProtocolData {

	logp.Debug("mongodb", "Parse method triggered")

	defer logp.Recover("ParseMongodb exception")

	// Either fetch or initialize current data struct for this parser
	priv := mongodbPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(mongodbPrivateData)
		if !ok {
			priv = mongodbPrivateData{}
		}
	}

	if priv.Data[dir] == nil {
		priv.Data[dir] = &MongodbStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &MongodbMessage{Ts: pkt.Ts},
		}
	} else {
		// concatenate bytes
		priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...)
		if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("mongodb", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	stream := priv.Data[dir]
	for len(stream.data) > 0 {
		if stream.message == nil {
			stream.message = &MongodbMessage{Ts: pkt.Ts}
		}

		ok, complete := mongodbMessageParser(priv.Data[dir])

		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("mongodb", "Ignore Mongodb message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {

			logp.Debug("mongodb", "MongoDB message complete")

			// all ok, go to next level
			mongodb.handleMongodb(stream.message, tcptuple, dir)

			// and reset message
			stream.PrepareForNewMessage()
		} else {
			// wait for more data
			logp.Debug("mongodb", "MongoDB wait for more data before parsing message")
			break
		}
	}

	return priv
}
Esempio n. 18
0
func (pgsql *Pgsql) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple,
	dir uint8, private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParsePgsql exception")

	priv := pgsqlPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(pgsqlPrivateData)
		if !ok {
			priv = pgsqlPrivateData{}
		}
	}

	if priv.Data[dir] == nil {
		priv.Data[dir] = &PgsqlStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &PgsqlMessage{Ts: pkt.Ts},
		}
		logp.Debug("pgsqldetailed", "New stream created")
	} else {
		// concatenate bytes
		priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...)
		logp.Debug("pgsqldetailed", "Len data: %d cap data: %d", len(priv.Data[dir].data), cap(priv.Data[dir].data))
		if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("pgsql", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	stream := priv.Data[dir]

	if priv.Data[1-dir] != nil && priv.Data[1-dir].seenSSLRequest {
		stream.expectSSLResponse = true
	}

	for len(stream.data) > 0 {

		if stream.message == nil {
			stream.message = &PgsqlMessage{Ts: pkt.Ts}
		}

		ok, complete := pgsql.pgsqlMessageParser(priv.Data[dir])
		//logp.Debug("pgsqldetailed", "MessageParser returned ok=%v complete=%v", ok, complete)
		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("pgsql", "Ignore Postgresql message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {
			// all ok, ship it
			msg := stream.data[stream.message.start:stream.message.end]

			if stream.message.isSSLRequest {
				// SSL request
				stream.seenSSLRequest = true
			} else if stream.message.isSSLResponse {
				// SSL request answered
				stream.expectSSLResponse = false
				priv.Data[1-dir].seenSSLRequest = false
			} else {
				if stream.message.toExport {
					pgsql.handlePgsql(pgsql, stream.message, tcptuple, dir, msg)
				}
			}

			// and reset message
			stream.PrepareForNewMessage()

		} else {
			// wait for more data
			break
		}
	}
	return priv
}
Esempio n. 19
0
func (openFlow *OpenFlow) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple, dir uint8,
	private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParseOpenFlow exception")

	priv := OpenFlowPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(OpenFlowPrivateData)
		if !ok {
			priv = OpenFlowPrivateData{}
		}
	}

	if priv.Data[dir] == nil {
		priv.Data[dir] = &OpenFlowStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &OpenFlowMessage{Ts: pkt.Ts},
		}
	} else {
		// concatenate bytes
		priv.Data[dir].data = append(priv.Data[dir].data, pkt.Payload...)
		if len(priv.Data[dir].data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("openflow", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	stream := priv.Data[dir]
	for len(stream.data) > 0 {
		if stream.message == nil {
			stream.message = &OpenFlowMessage{Ts: pkt.Ts}
		}

		ok, complete := openFlowMessageParser(priv.Data[dir])

		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("openflow", "Ignore OpenFlow message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {

			logp.Err("openflow", "OpenFlow message type: ", stream.message.messageType)

			// all ok, go to next level
			openFlow.handleOpenFlow(stream.message, tcptuple, dir)

			// and reset message
			stream.PrepareForNewMessage()
		} else {
			// wait for more data
			break
		}
	}

	return priv
}
Esempio n. 20
0
func (thrift *Thrift) Parse(pkt *protos.Packet, tcptuple *common.TcpTuple, dir uint8,
	private protos.ProtocolData) protos.ProtocolData {

	defer logp.Recover("ParseThrift exception")

	priv := thriftPrivateData{}
	if private != nil {
		var ok bool
		priv, ok = private.(thriftPrivateData)
		if !ok {
			priv = thriftPrivateData{}
		}
	}

	stream := priv.Data[dir]

	if stream == nil {
		stream = &ThriftStream{
			tcptuple: tcptuple,
			data:     pkt.Payload,
			message:  &ThriftMessage{Ts: pkt.Ts},
		}
		priv.Data[dir] = stream
	} else {
		if stream.skipInput {
			// stream currently suspended in this direction
			return priv
		}
		// concatenate bytes
		stream.data = append(stream.data, pkt.Payload...)
		if len(stream.data) > tcp.TCP_MAX_DATA_IN_STREAM {
			logp.Debug("thrift", "Stream data too large, dropping TCP stream")
			priv.Data[dir] = nil
			return priv
		}
	}

	for len(stream.data) > 0 {
		if stream.message == nil {
			stream.message = &ThriftMessage{Ts: pkt.Ts}
		}

		ok, complete := thrift.messageParser(priv.Data[dir])
		logp.Debug("thriftdetailed", "messageParser returned %v %v", ok, complete)

		if !ok {
			// drop this tcp stream. Will retry parsing with the next
			// segment in it
			priv.Data[dir] = nil
			logp.Debug("thrift", "Ignore Thrift message. Drop tcp stream. Try parsing with the next segment")
			return priv
		}

		if complete {
			thrift.messageComplete(tcptuple, dir, stream, &priv)
		} else {
			// wait for more data
			break
		}
	}

	logp.Debug("thriftdetailed", "Out")

	return priv
}