コード例 #1
0
func BytesPerMinutePipeline(levelDbManager store.Manager, bytesPerHourPostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	mappedStore := levelDbManager.ReadingWriter("bytesperminute-mapped")
	bytesPerMinuteStore := levelDbManager.ReadingWriter("bytesperminute")
	bytesPerHourStore := levelDbManager.ReadingWriter("bytesperhour")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-consolidated-trace-key-ranges")
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerMinuteMapper",
			Reader:      store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
			Transformer: transformer.MakeDoTransformer(bytesPerMinuteMapper(transformer.NewNonce())),
			Writer:      mappedStore,
		},
		transformer.PipelineStage{
			Name:        "BytesPerMinuteReducer",
			Reader:      mappedStore,
			Transformer: transformer.TransformFunc(bytesPerMinuteReducer),
			Writer:      bytesPerMinuteStore,
		},
		transformer.PipelineStage{
			Name:        "BytesPerHourReducer",
			Reader:      bytesPerMinuteStore,
			Transformer: transformer.TransformFunc(bytesPerHourReducer),
			Writer:      bytesPerHourStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerHourPostgres",
			Reader: bytesPerHourStore,
			Writer: bytesPerHourPostgresStore,
		},
	}, TraceKeyRangesPipeline(store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore), traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #2
0
func BytesPerDevicePipeline(levelDbManager store.Manager, bytesPerDevicePostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	sessionsStore := levelDbManager.ReadingDeleter("bytesperdevice-session")
	addressTableStore := levelDbManager.SeekingWriter("bytesperdevice-address-table")
	flowTableStore := levelDbManager.SeekingWriter("bytesperdevice-flow-table")
	packetsStore := levelDbManager.SeekingWriter("bytesperdevice-packets")
	flowIdToMacStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-mac")
	flowIdToMacsStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-macs")
	bytesPerDeviceUnreducedStore := levelDbManager.SeekingWriter("bytesperdevice-unreduced")
	bytesPerDeviceSessionStore := levelDbManager.ReadingWriter("bytesperdevice-reduced-sessions")
	bytesPerDeviceStore := levelDbManager.ReadingWriter("bytesperdevice")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-consolidated-trace-key-ranges")
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerDeviceMapper",
			Reader:      newTracesStore,
			Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDeviceMapper, 3),
			Writer:      store.NewMuxingWriter(addressTableStore, flowTableStore, packetsStore),
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "JoinMacAndFlowId",
			Reader:      store.NewPrefixIncludingReader(store.NewDemuxingSeeker(addressTableStore, flowTableStore), sessionsStore),
			Transformer: transformer.TransformFunc(joinMacAndFlowId),
			Writer:      flowIdToMacStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenMacAddresses",
			Reader:      store.NewPrefixIncludingReader(flowIdToMacStore, sessionsStore),
			Transformer: transformer.TransformFunc(flattenMacAddresses),
			Writer:      flowIdToMacsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinMacAndSizes",
			Reader:      store.NewPrefixIncludingReader(store.NewDemuxingSeeker(flowIdToMacsStore, packetsStore), sessionsStore),
			Transformer: transformer.TransformFunc(joinMacAndSizes),
			Writer:      bytesPerDeviceUnreducedStore,
		},
		transformer.PipelineStage{
			Name:        "ReduceBytesPerDeviceSession",
			Reader:      store.NewPrefixIncludingReader(bytesPerDeviceUnreducedStore, sessionsStore),
			Transformer: transformer.TransformFunc(reduceBytesPerDeviceSession),
			Writer:      bytesPerDeviceSessionStore,
		},
		transformer.PipelineStage{
			Name:        "ReduceBytesPerDevice",
			Reader:      bytesPerDeviceSessionStore,
			Transformer: transformer.TransformFunc(reduceBytesPerDevice),
			Writer:      bytesPerDeviceStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerDevicePostgres",
			Reader: bytesPerDeviceStore,
			Writer: bytesPerDevicePostgresStore,
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #3
0
func AvailabilityPipeline(levelDbManager store.Manager, jsonWriter io.Writer, timestamp int64) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	intervalsStore := levelDbManager.ReadingWriter("availability-intervals")
	consolidatedStore := levelDbManager.ReadingDeleter("availability-consolidated")
	nodesStore := levelDbManager.ReadingDeleter("availability-nodes")
	excludeRangesStore := levelDbManager.ReadingDeleter("availability-done")
	consistentRangesStore := levelDbManager.ReadingDeleter("consistent-ranges")
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "AvailabilityIntervals",
			Reader:      store.NewRangeExcludingReader(tracesStore, excludeRangesStore),
			Transformer: transformer.TransformFunc(availabilityIntervals),
			Writer:      intervalsStore,
		},
		transformer.PipelineStage{
			Name:        "ConsolidateAvailabilityIntervals",
			Reader:      intervalsStore,
			Transformer: transformer.TransformFunc(consolidateAvailabilityIntervals),
			Writer:      store.NewTruncatingWriter(consolidatedStore),
		},
		transformer.PipelineStage{
			Name:        "AvailabilityReducer",
			Reader:      consolidatedStore,
			Transformer: transformer.TransformFunc(availabilityReducer),
			Writer:      store.NewTruncatingWriter(nodesStore),
		},
		transformer.PipelineStage{
			Name:   "AvailabilityJson",
			Reader: nodesStore,
			Writer: &availabilityJsonStore{writer: jsonWriter, timestamp: timestamp},
		},
		transformer.PipelineStage{
			Name:        "GenerateExcludedRanges",
			Reader:      consolidatedStore,
			Transformer: transformer.MakeMapFunc(generateExcludedRanges),
			Writer:      store.NewTruncatingWriter(excludeRangesStore),
		},
		transformer.PipelineStage{
			Name:        "GenerateConsistentRanges",
			Reader:      excludeRangesStore,
			Transformer: transformer.MakeDoFunc(generateConsistentRanges),
			Writer:      store.NewTruncatingWriter(consistentRangesStore),
		},
	}
}
コード例 #4
0
func AggregateStatisticsPipeline(levelDbManager store.Manager, jsonWriter io.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	traceAggregatesStore := levelDbManager.SeekingWriter("statistics-trace-aggregates")
	sessionAggregatesStore := levelDbManager.ReadingWriter("statistics-session-aggregates")
	nodeAggregatesStore := levelDbManager.ReadingWriter("statistics-node-aggregates")
	sessionsStore := levelDbManager.ReadingDeleter("statistics-sessions")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-consolidated-trace-key-ranges")
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "AggregateStatisticsMapper",
			Reader:      store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
			Transformer: transformer.MakeMapFunc(aggregateStatisticsMapper),
			Writer:      traceAggregatesStore,
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "AggregateStatisticsReduceBySession",
			Reader:      store.NewPrefixIncludingReader(traceAggregatesStore, sessionsStore),
			Transformer: transformer.TransformFunc(aggregateStatisticsReduceBySession),
			Writer:      sessionAggregatesStore,
		},
		transformer.PipelineStage{
			Name:        "AggregateStatisticsReducer",
			Reader:      sessionAggregatesStore,
			Transformer: transformer.TransformFunc(aggregateStatisticsReducer),
			Writer:      nodeAggregatesStore,
		},
		transformer.PipelineStage{
			Name:   "AggregateStatisticsJson",
			Reader: nodeAggregatesStore,
			Writer: &aggregateStatisticsJsonStore{writer: jsonWriter},
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #5
0
ファイル: summary.go プロジェクト: sburnett/bismark-tools
func SummarizePipeline(levelDbManager store.Manager, csvManager store.Manager) transformer.Pipeline {
	statsStore := levelDbManager.Reader("stats")
	statsWithHourStore := levelDbManager.ReadingDeleter("stats-with-hour")
	statsWithDayStore := levelDbManager.ReadingDeleter("stats-with-day")
	statsWithReceivedTimestampStore := levelDbManager.ReadingDeleter("stats-with-received-timestamp")
	interarrivalTimesStore := levelDbManager.ReadingDeleter("interarrival-times")
	sizeSummaryStore := levelDbManager.ReadingWriter("size-summary")
	sizeSummaryByHourStore := levelDbManager.ReadingWriter("size-summary-by-hour")
	sizeSummaryByDayStore := levelDbManager.ReadingWriter("size-summary-by-day")
	interarrivalTimesSummaryStore := levelDbManager.ReadingWriter("interarrival-times-summary")
	sizePerDayStore := levelDbManager.ReadingWriter("sizes-by-day")

	sizeSummaryWriter := makeSummaryCsvWriter(csvManager, "size-summary.csv")
	sizeSummaryByHourWriter := makeSummaryByTimestampCsvWriter(csvManager, "size-summary-by-hour.csv")
	sizeSummaryByDayWriter := makeSummaryByTimestampCsvWriter(csvManager, "size-summary-by-day.csv")
	interarrivalTimesSummaryWriter := makeSummaryCsvWriter(csvManager, "interarrival-times-summary.csv")
	sizesPerDayWriter := csvManager.Writer("sizes-per-day.csv", []string{"experiment", "node", "timestamp"}, []string{"count"}, new(string), new(string), new(int64), new(int64))

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "SummarizeSizes",
			Reader:      statsStore,
			Transformer: transformer.TransformFunc(summarizeSizes),
			Writer:      sizeSummaryStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByHour",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByHour),
			Writer:      store.NewTruncatingWriter(statsWithHourStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesByHour",
			Reader:      statsWithHourStore,
			Transformer: transformer.TransformFunc(summarizeSizesByTimestamp),
			Writer:      sizeSummaryByHourStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByDay",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByDay),
			Writer:      store.NewTruncatingWriter(statsWithDayStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesByDay",
			Reader:      statsWithDayStore,
			Transformer: transformer.TransformFunc(summarizeSizesByTimestamp),
			Writer:      sizeSummaryByDayStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByReceivedTimestamp",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByReceviedTimestamp),
			Writer:      store.NewTruncatingWriter(statsWithReceivedTimestampStore),
		},
		transformer.PipelineStage{
			Name:        "ComputeInterarrivalTimes",
			Reader:      statsWithReceivedTimestampStore,
			Transformer: transformer.TransformFunc(computeInterarrivalTimes),
			Writer:      store.NewTruncatingWriter(interarrivalTimesStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeInterarrival",
			Reader:      interarrivalTimesStore,
			Transformer: transformer.TransformFunc(summarizeInterarrivalTimes),
			Writer:      interarrivalTimesSummaryStore,
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesPerDay",
			Reader:      statsStore,
			Transformer: transformer.TransformFunc(summarizeSizesPerDay),
			Writer:      sizePerDayStore,
		},
		transformer.PipelineStage{
			Name:        "AggregateExperimentsPerDay",
			Reader:      sizePerDayStore,
			Transformer: transformer.TransformFunc(aggregateSizesPerDay),
			Writer:      sizePerDayStore,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummary",
			Reader: sizeSummaryStore,
			Writer: sizeSummaryWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummaryByHour",
			Reader: sizeSummaryByHourStore,
			Writer: sizeSummaryByHourWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummaryByDay",
			Reader: sizeSummaryByDayStore,
			Writer: sizeSummaryByDayWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteInterarrivalTimesSummary",
			Reader: interarrivalTimesSummaryStore,
			Writer: interarrivalTimesSummaryWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizePerDaySummary",
			Reader: sizePerDayStore,
			Writer: sizesPerDayWriter,
		},
	}
}
コード例 #6
0
func BytesPerDomainPipeline(levelDbManager store.Manager, bytesPerDomainPostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-consolidated-trace-key-ranges")
	addressIdTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-id-table")
	aRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-a-record-table")
	cnameRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-cname-record-table")
	flowIpsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-ips-table")
	addressIpTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-ip-table")
	bytesPerTimestampShardedStore := levelDbManager.SeekingWriter("bytesperdomain-bytes-per-timestamp-sharded")
	whitelistStore := levelDbManager.SeekingWriter("bytesperdomain-whitelist")
	aRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-a-records-with-mac")
	cnameRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-cname-records-with-mac")
	allDnsMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-dns-mappings")
	allWhitelistedMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-whitelisted-mappings")
	flowMacsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-macs-table")
	flowDomainsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-table")
	flowDomainsGroupedTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-grouped-table")
	bytesPerDomainShardedStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-sharded")
	bytesPerDomainPerDeviceStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-per-device")
	bytesPerDomainStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain")
	sessionsStore := levelDbManager.ReadingDeleter("bytesperdomain-sessions")
	excludeOldSessions := func(stor store.Seeker) store.Seeker {
		return store.NewPrefixIncludingReader(stor, sessionsStore)
	}
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerDomainMapper",
			Reader:      newTracesStore,
			Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDomainMapper, 7),
			Writer:      store.NewMuxingWriter(addressIdTableStore, aRecordTableStore, cnameRecordTableStore, flowIpsTableStore, addressIpTableStore, bytesPerTimestampShardedStore, whitelistStore),
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "JoinAAddressIdsWithMacAddresses",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, aRecordTableStore)),
			Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
			Writer:      aRecordsWithMacStore,
		},
		transformer.PipelineStage{
			Name:        "JoinCnameAddressIdsWithMacAddresses",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, cnameRecordTableStore)),
			Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
			Writer:      cnameRecordsWithMacStore,
		},
		transformer.PipelineStage{
			Name:        "JoinARecordsWithCnameRecords",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(aRecordsWithMacStore, cnameRecordsWithMacStore)),
			Transformer: transformer.TransformFunc(joinARecordsWithCnameRecords),
			Writer:      allDnsMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "EmitARecords",
			Reader:      excludeOldSessions(aRecordsWithMacStore),
			Transformer: transformer.MakeDoFunc(emitARecords),
			Writer:      allDnsMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinDomainsWithWhitelist",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(whitelistStore, allDnsMappingsStore)),
			Transformer: transformer.TransformFunc(joinDomainsWithWhitelist),
			Writer:      allWhitelistedMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinMacWithFlowId",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIpTableStore, flowIpsTableStore)),
			Transformer: transformer.TransformFunc(joinMacWithFlowId),
			Writer:      flowMacsTableStore,
		},
		transformer.PipelineStage{
			Name:        "JoinWhitelistedDomainsWithFlows",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(allWhitelistedMappingsStore, flowMacsTableStore)),
			Transformer: transformer.TransformFunc(joinWhitelistedDomainsWithFlows),
			Writer:      flowDomainsTableStore,
		},
		transformer.PipelineStage{
			Name:        "GroupDomainsAndMacAddresses",
			Reader:      excludeOldSessions(flowDomainsTableStore),
			Transformer: transformer.TransformFunc(groupDomainsAndMacAddresses),
			Writer:      flowDomainsGroupedTableStore,
		},
		transformer.PipelineStage{
			Name:        "JoinDomainsWithSizes",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(flowDomainsGroupedTableStore, bytesPerTimestampShardedStore)),
			Transformer: transformer.TransformFunc(joinDomainsWithSizes),
			Writer:      bytesPerDomainShardedStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenIntoBytesPerDevice",
			Reader:      bytesPerDomainShardedStore,
			Transformer: transformer.TransformFunc(flattenIntoBytesPerDevice),
			Writer:      bytesPerDomainPerDeviceStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenIntoBytesPerTimestamp",
			Reader:      bytesPerDomainShardedStore,
			Transformer: transformer.TransformFunc(flattenIntoBytesPerTimestamp),
			Writer:      bytesPerDomainStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerDomainPostgresStore",
			Reader: bytesPerDomainStore,
			Writer: bytesPerDomainPostgresStore,
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}