コード例 #1
0
ファイル: summary.go プロジェクト: sburnett/bismark-tools
func makeSummaryByTimestampCsvWriter(manager store.Manager, name string) store.Writer {
	keyNames := []string{
		"experiment",
		"node",
		"timestamp",
	}
	valueNames := []string{
		"count",
	}
	for _, i := range []int{0, 1, 5, 10, 25, 50, 75, 90, 95, 99, 100} {
		valueNames = append(valueNames, humanize.Ordinal(i))
	}
	arguments := []interface{}{
		name,
		keyNames,
		valueNames,
		new(string), // experiment
		new(string), // node
		new(int64),  // timestamp
		new(int64),  // count
	}
	for _ = range []int{0, 1, 5, 10, 25, 50, 75, 90, 95, 99, 100} {
		arguments = append(arguments, new(int64))
	}
	return manager.Writer(arguments...)
}
コード例 #2
0
ファイル: packages.go プロジェクト: sburnett/bismark-tools
func PackagesPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	installedPackagesStore := levelDbManager.ReadingWriter("installed-packages")
	versionChangesStore := levelDbManager.ReadingWriter("version-changes")
	var node, packageName string
	var timestamp int64
	var version string
	csvStore := csvManager.Writer("packages.csv", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, &timestamp, &version)
	sqliteStore := sqliteManager.Writer("packages", []string{"node", "package", "timestamp"}, []string{"version"}, &node, &packageName, &timestamp, &version)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "OpkgListInstalled",
			Reader:      ReadOnlySomeLogs(logsStore, "opkg_list-installed"),
			Transformer: transformer.MakeDoFunc(extractInstalledPackages),
			Writer:      installedPackagesStore,
		},
		transformer.PipelineStage{
			Name:        "DetectVersionChanges",
			Reader:      installedPackagesStore,
			Transformer: transformer.TransformFunc(detectChangedPackageVersions),
			Writer:      versionChangesStore,
		},
		transformer.PipelineStage{
			Name:   "WriteVersionChangesSqlite",
			Reader: versionChangesStore,
			Writer: sqliteStore,
		},
		transformer.PipelineStage{
			Name:   "WriteVersionChangesCsv",
			Reader: versionChangesStore,
			Writer: csvStore,
		},
	}
}
コード例 #3
0
func FilterNodesPipeline(nodeId string, levelDbManager store.Manager) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	filteredStore := levelDbManager.Writer(fmt.Sprintf("filtered-%s", nodeId))
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:   "FilterNode",
			Reader: FilterNodes(tracesStore, nodeId),
			Writer: filteredStore,
		},
	}
}
コード例 #4
0
ファイル: csv.go プロジェクト: sburnett/bismark-tools
func CsvPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
	var experiment, node, filename string
	var receivedTimestamp, creationTimestamp, size int64
	csvStore := csvManager.Writer("stats.csv", []string{"experiment", "node", "filename"}, []string{"received_timestamp", "creation_timestamp", "size"}, &experiment, &node, &filename, &receivedTimestamp, &creationTimestamp, &size)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:   "WriteStatsCsv",
			Reader: levelDbManager.Reader("stats"),
			Writer: csvStore,
		},
	}
}
コード例 #5
0
ファイル: reboots.go プロジェクト: sburnett/bismark-tools
func RebootsPipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
	uptimeStore := levelDbManager.Seeker("uptime")
	rebootsStore := levelDbManager.ReadingWriter("reboots")
	var node string
	var timestamp int64
	rebootsCsvStore := csvManager.Writer("reboots.csv", []string{"node", "boot_timestamp"}, []string{}, &node, &timestamp)
	rebootsSqliteStore := sqliteManager.Writer("reboots", []string{"node", "boot_timestamp"}, []string{}, &node, &timestamp)

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "InferReboots",
			Reader:      uptimeStore,
			Transformer: transformer.TransformFunc(inferReboots),
			Writer:      rebootsStore,
		},
		transformer.PipelineStage{
			Name:   "WriteRebootsCsv",
			Reader: rebootsStore,
			Writer: rebootsCsvStore,
		},
		transformer.PipelineStage{
			Name:   "WriteRebootsSqlite",
			Reader: rebootsStore,
			Writer: rebootsSqliteStore,
		},
	}
}
コード例 #6
0
ファイル: uptime.go プロジェクト: sburnett/bismark-tools
func UptimePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	uptimeStore := levelDbManager.ReadingWriter("uptime")
	var node string
	var timestamp, uptime int64
	csvStore := csvManager.Writer("uptime.csv", []string{"node", "timestamp"}, []string{"uptime"}, &node, &timestamp, &uptime)
	sqliteStore := sqliteManager.Writer("uptime", []string{"node", "timestamp"}, []string{"uptime"}, &node, &timestamp, &uptime)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "Uptime",
			Reader:      ReadOnlySomeLogs(logsStore, "uptime"),
			Transformer: transformer.MakeMapFunc(extractUptime),
			Writer:      uptimeStore,
		},
		transformer.PipelineStage{
			Name:   "WriteUptimeCsv",
			Reader: uptimeStore,
			Writer: csvStore,
		},
		transformer.PipelineStage{
			Name:   "WriteUptimeSqlite",
			Reader: uptimeStore,
			Writer: sqliteStore,
		},
	}
}
コード例 #7
0
ファイル: memory.go プロジェクト: sburnett/bismark-tools
func MemoryUsagePipeline(levelDbManager, csvManager, sqliteManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	memoryUsageStore := levelDbManager.ReadingWriter("memory")
	var node string
	var timestamp, used, free int64
	csvStore := csvManager.Writer("memory.csv", []string{"node", "timestamp"}, []string{"used", "free"}, &node, &timestamp, &used, &free)
	sqliteStore := sqliteManager.Writer("memory", []string{"node", "timestamp"}, []string{"used", "free"}, &node, &timestamp, &used, &free)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "Memory",
			Reader:      ReadOnlySomeLogs(logsStore, "top"),
			Transformer: transformer.MakeDoFunc(extractMemoryUsage),
			Writer:      memoryUsageStore,
		},
		transformer.PipelineStage{
			Name:   "WriteMemoryUsageCsv",
			Reader: memoryUsageStore,
			Writer: csvStore,
		},
		transformer.PipelineStage{
			Name:   "WriteMemoryUsageSqlite",
			Reader: memoryUsageStore,
			Writer: sqliteStore,
		},
	}
}
コード例 #8
0
func DevicesCountPipeline(levelDbManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	devicesCountStore := levelDbManager.Writer("devices-count")
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "ExtractEthernetCount",
			Reader:      ReadOnlySomeLogs(logsStore, "swconfig_ports"),
			Transformer: transformer.MakeDoFunc(extractEthernetCount),
			Writer:      devicesCountStore,
		},
		transformer.PipelineStage{
			Name:        "ExtractWirelessCount",
			Reader:      ReadOnlySomeLogs(logsStore, "iw_station_count"),
			Transformer: transformer.MakeDoFunc(extractWirelessCount),
			Writer:      devicesCountStore,
		},
	}
}
コード例 #9
0
ファイル: timescsv.go プロジェクト: sburnett/bismark-tools
func TimesCsvPipeline(levelDbManager store.Manager, csvRoot string) transformer.Pipeline {
	writeTimesCsv := func(inputChan, outputChan chan *store.Record) {
		var currentHandle *os.File
		var currentExperiment, currentNode string
		for record := range inputChan {
			var statsKey StatsKey
			lex.DecodeOrDie(record.Key, &statsKey)
			var statsValue StatsValue
			lex.DecodeOrDie(record.Value, &statsValue)

			if currentExperiment != statsKey.Experiment || currentNode != statsKey.Node {
				if currentHandle != nil {
					currentHandle.Close()
				}
				currentExperiment = statsKey.Experiment
				currentNode = statsKey.Node

				csvName := fmt.Sprintf("%s_%s.csv", currentExperiment, currentNode)
				newHandle, err := os.Create(filepath.Join(csvRoot, csvName))
				if err != nil {
					panic(err)
				}
				currentHandle = newHandle
			}

			if _, err := fmt.Fprintf(currentHandle, "%d,%d\n", statsValue.CreationTimestamp, statsValue.ReceivedTimestamp); err != nil {
				panic(err)
			}
		}
		if currentHandle != nil {
			currentHandle.Close()
		}
	}

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "WriteTimesCsv",
			Reader:      levelDbManager.Reader("stats"),
			Transformer: transformer.TransformFunc(writeTimesCsv),
		},
	}
}
コード例 #10
0
func runFilterSessionsPipeline(startSecs, endSecs int64, levelDbManager store.Manager) {

	transformer.RunPipeline(FilterSessionsPipeline(startSecs, endSecs, levelDbManager, "test"))

	filteredStore := levelDbManager.Reader("test")
	filteredStore.BeginReading()
	for {
		record, err := filteredStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var traceKey TraceKey
		lex.DecodeOrDie(record.Key, &traceKey)
		fmt.Printf("%s %d %d\n", traceKey.NodeId, traceKey.SessionId, traceKey.SequenceNumber)
	}
	filteredStore.EndReading()
}
コード例 #11
0
ファイル: index.go プロジェクト: sburnett/bismark-tools
func IndexTarballsPipeline(tarballsPath string, levelDbManager store.Manager) transformer.Pipeline {
	allTarballsPattern := filepath.Join(tarballsPath, "all", "health", "*", "*", "health_*.tar.gz")
	dailyTarballsPattern := filepath.Join(tarballsPath, "by-date", "*", "health", "*", "health_*.tar.gz")
	tarnamesStore := levelDbManager.ReadingWriter("tarnames")
	tarnamesIndexedStore := levelDbManager.ReadingWriter("tarnames-indexed")
	logsStore := levelDbManager.Writer("logs")
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:   "ScanLogTarballs",
			Reader: store.NewGlobReader(allTarballsPattern),
			Writer: tarnamesStore,
		},
		transformer.PipelineStage{
			Name:   "ScanDailyLogTarballs",
			Reader: store.NewGlobReader(dailyTarballsPattern),
			Writer: tarnamesStore,
		},
		transformer.PipelineStage{
			Name:        "ReadLogTarballs",
			Reader:      store.NewDemuxingReader(tarnamesStore, tarnamesIndexedStore),
			Transformer: transformer.MakeMultipleOutputsGroupDoFunc(IndexTarballs, 2),
			Writer:      store.NewMuxingWriter(logsStore, tarnamesIndexedStore),
		},
	}
}
コード例 #12
0
ファイル: summary.go プロジェクト: sburnett/bismark-tools
func makeSummaryCsvWriter(manager store.Manager, name string) store.Writer {
	keyNames := []string{
		"experiment",
		"node",
	}
	valueNames := []string{
		"count",
	}
	for i := 0; i <= 100; i += 5 {
		valueNames = append(valueNames, humanize.Ordinal(i))
	}
	arguments := []interface{}{
		name,
		keyNames,
		valueNames,
		new(string), // experiment
		new(string), // node
		new(int64),  // count
	}
	for i := 0; i <= 100; i += 5 {
		arguments = append(arguments, new(int64))
	}
	return manager.Writer(arguments...)
}
コード例 #13
0
func FilterSessionsPipeline(sessionStartTime, sessionEndTime int64, levelDbManager store.Manager, outputName string) transformer.Pipeline {
	tracesStore := levelDbManager.Reader("traces")
	traceKeyRangesStore := levelDbManager.Reader("availability-done")
	filteredStore := levelDbManager.Writer(outputName)
	parameters := filterSessions{
		SessionStartTime: sessionStartTime * 1000000,
		SessionEndTime:   sessionEndTime * 1000000,
	}
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "FilterSessions",
			Reader:      store.NewDemuxingReader(traceKeyRangesStore, tracesStore),
			Transformer: parameters,
			Writer:      filteredStore,
		},
	}
}
コード例 #14
0
func IndexTarballsPipeline(tarballsPath string, levelDbManager store.Manager) transformer.Pipeline {
	tarballsPattern := filepath.Join(tarballsPath, "*", "*", "*.tar.gz")
	tarnamesStore := levelDbManager.ReadingWriter("tarnames")
	tarnamesIndexedStore := levelDbManager.ReadingWriter("tarnames-indexed")
	tracesStore := levelDbManager.Writer("traces")
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:   "ScanTraceTarballs",
			Reader: store.NewGlobReader(tarballsPattern),
			Writer: tarnamesStore,
		},
		transformer.PipelineStage{
			Name:        "IndexTraces",
			Transformer: transformer.MakeMultipleOutputsGroupDoFunc(IndexTarballs, 2),
			Reader:      store.NewDemuxingReader(tarnamesStore, tarnamesIndexedStore),
			Writer:      store.NewMuxingWriter(tracesStore, tarnamesIndexedStore),
		},
	}
}
コード例 #15
0
ファイル: disjoint.go プロジェクト: sburnett/bismark-tools
func DisjointPackagesPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	disjointPackagesStore := levelDbManager.ReadingWriter("disjoint-packages")
	var filename, node string
	var timestamp int64
	csvStore := csvManager.Writer("not-disjoint.csv", []string{"filename", "node", "timestamp"}, []string{}, &filename, &node, &timestamp)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "DisjointPackages",
			Reader:      logsStore,
			Transformer: transformer.MakeDoFunc(detectDisjointPackagesError),
			Writer:      disjointPackagesStore,
		},
		transformer.PipelineStage{
			Name:   "WriteDisjointPackagesCsv",
			Reader: disjointPackagesStore,
			Writer: csvStore,
		},
	}
}
コード例 #16
0
ファイル: iproute.go プロジェクト: sburnett/bismark-tools
func IpRoutePipeline(levelDbManager, sqliteManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	defaultRoutesStore := levelDbManager.ReadingWriter("default-routes")
	var node string
	var timestamp int64
	var gateway string
	sqliteStore := sqliteManager.Writer("defaultroutes", []string{"node", "timestamp"}, []string{"gateway"}, &node, &timestamp, &gateway)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "ExtractDefaultRoute",
			Reader:      ReadOnlySomeLogs(logsStore, "iproute"),
			Transformer: transformer.MakeDoFunc(extractDefaultRoute),
			Writer:      defaultRoutesStore,
		},
		transformer.PipelineStage{
			Name:   "WriteDefaultRoutesSqlite",
			Reader: defaultRoutesStore,
			Writer: sqliteStore,
		},
	}
}
コード例 #17
0
ファイル: filesystem.go プロジェクト: sburnett/bismark-tools
func FilesystemUsagePipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
	logsStore := levelDbManager.Seeker("logs")
	filesystemUsageStore := levelDbManager.ReadingWriter("filesystem")
	var mount, node string
	var timestamp, used, free int64
	csvStore := csvManager.Writer("filesystem.csv", []string{"mount", "node", "timestamp"}, []string{"used", "free"}, &mount, &node, &timestamp, &used, &free)

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "Filesystem",
			Reader:      ReadOnlySomeLogs(logsStore, "df"),
			Transformer: transformer.MakeDoFunc(extractFilesystemUsage),
			Writer:      filesystemUsageStore,
		},
		transformer.PipelineStage{
			Name:   "WriteFilesystemUsageCsv",
			Reader: filesystemUsageStore,
			Writer: csvStore,
		},
	}
}
コード例 #18
0
func BytesPerDomainPipeline(levelDbManager store.Manager, bytesPerDomainPostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdomain-consolidated-trace-key-ranges")
	addressIdTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-id-table")
	aRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-a-record-table")
	cnameRecordTableStore := levelDbManager.SeekingWriter("bytesperdomain-cname-record-table")
	flowIpsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-ips-table")
	addressIpTableStore := levelDbManager.SeekingWriter("bytesperdomain-address-ip-table")
	bytesPerTimestampShardedStore := levelDbManager.SeekingWriter("bytesperdomain-bytes-per-timestamp-sharded")
	whitelistStore := levelDbManager.SeekingWriter("bytesperdomain-whitelist")
	aRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-a-records-with-mac")
	cnameRecordsWithMacStore := levelDbManager.SeekingWriter("bytesperdomain-cname-records-with-mac")
	allDnsMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-dns-mappings")
	allWhitelistedMappingsStore := levelDbManager.SeekingWriter("bytesperdomain-all-whitelisted-mappings")
	flowMacsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-macs-table")
	flowDomainsTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-table")
	flowDomainsGroupedTableStore := levelDbManager.SeekingWriter("bytesperdomain-flow-domains-grouped-table")
	bytesPerDomainShardedStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-sharded")
	bytesPerDomainPerDeviceStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain-per-device")
	bytesPerDomainStore := levelDbManager.ReadingWriter("bytesperdomain-bytes-per-domain")
	sessionsStore := levelDbManager.ReadingDeleter("bytesperdomain-sessions")
	excludeOldSessions := func(stor store.Seeker) store.Seeker {
		return store.NewPrefixIncludingReader(stor, sessionsStore)
	}
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerDomainMapper",
			Reader:      newTracesStore,
			Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDomainMapper, 7),
			Writer:      store.NewMuxingWriter(addressIdTableStore, aRecordTableStore, cnameRecordTableStore, flowIpsTableStore, addressIpTableStore, bytesPerTimestampShardedStore, whitelistStore),
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "JoinAAddressIdsWithMacAddresses",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, aRecordTableStore)),
			Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
			Writer:      aRecordsWithMacStore,
		},
		transformer.PipelineStage{
			Name:        "JoinCnameAddressIdsWithMacAddresses",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIdTableStore, cnameRecordTableStore)),
			Transformer: transformer.TransformFunc(joinAddressIdsWithMacAddresses),
			Writer:      cnameRecordsWithMacStore,
		},
		transformer.PipelineStage{
			Name:        "JoinARecordsWithCnameRecords",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(aRecordsWithMacStore, cnameRecordsWithMacStore)),
			Transformer: transformer.TransformFunc(joinARecordsWithCnameRecords),
			Writer:      allDnsMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "EmitARecords",
			Reader:      excludeOldSessions(aRecordsWithMacStore),
			Transformer: transformer.MakeDoFunc(emitARecords),
			Writer:      allDnsMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinDomainsWithWhitelist",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(whitelistStore, allDnsMappingsStore)),
			Transformer: transformer.TransformFunc(joinDomainsWithWhitelist),
			Writer:      allWhitelistedMappingsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinMacWithFlowId",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(addressIpTableStore, flowIpsTableStore)),
			Transformer: transformer.TransformFunc(joinMacWithFlowId),
			Writer:      flowMacsTableStore,
		},
		transformer.PipelineStage{
			Name:        "JoinWhitelistedDomainsWithFlows",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(allWhitelistedMappingsStore, flowMacsTableStore)),
			Transformer: transformer.TransformFunc(joinWhitelistedDomainsWithFlows),
			Writer:      flowDomainsTableStore,
		},
		transformer.PipelineStage{
			Name:        "GroupDomainsAndMacAddresses",
			Reader:      excludeOldSessions(flowDomainsTableStore),
			Transformer: transformer.TransformFunc(groupDomainsAndMacAddresses),
			Writer:      flowDomainsGroupedTableStore,
		},
		transformer.PipelineStage{
			Name:        "JoinDomainsWithSizes",
			Reader:      excludeOldSessions(store.NewDemuxingSeeker(flowDomainsGroupedTableStore, bytesPerTimestampShardedStore)),
			Transformer: transformer.TransformFunc(joinDomainsWithSizes),
			Writer:      bytesPerDomainShardedStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenIntoBytesPerDevice",
			Reader:      bytesPerDomainShardedStore,
			Transformer: transformer.TransformFunc(flattenIntoBytesPerDevice),
			Writer:      bytesPerDomainPerDeviceStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenIntoBytesPerTimestamp",
			Reader:      bytesPerDomainShardedStore,
			Transformer: transformer.TransformFunc(flattenIntoBytesPerTimestamp),
			Writer:      bytesPerDomainStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerDomainPostgresStore",
			Reader: bytesPerDomainStore,
			Writer: bytesPerDomainPostgresStore,
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #19
0
ファイル: summarize.go プロジェクト: sburnett/bismark-tools
func SummarizeHealthPipeline(levelDbManager, csvManager store.Manager) transformer.Pipeline {
	memoryStore := levelDbManager.Reader("memory")
	memoryUsageByDayStore := levelDbManager.ReadingWriter("memory-usage-by-day")
	memoryUsageByDaySummarizedStore := levelDbManager.ReadingWriter("memory-usage-by-day-summarized")
	filesystemStore := levelDbManager.Reader("filesystem")
	filesystemUsageByDayStore := levelDbManager.ReadingWriter("filesystem-usage-by-day")
	filesystemUsageByDaySummarizedStore := levelDbManager.ReadingWriter("filesystem-usage-by-day-summarized")

	var timestamp, usage int64
	var filesystem, node string
	memoryUsageSummaryCsv := csvManager.Writer("memory-usage-summary.csv", []string{"timestamp", "node"}, []string{"usage"}, &timestamp, &node, &usage)
	filesystemUsageSummaryCsv := csvManager.Writer("filesystem-usage-summary.csv", []string{"filesystem", "timestamp", "node"}, []string{"usage"}, &filesystem, &timestamp, &node, &usage)

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "OrderMemoryUsageByTimestamp",
			Reader:      memoryStore,
			Transformer: transformer.MakeMapFunc(orderRecordsByDay),
			Writer:      memoryUsageByDayStore,
		},
		transformer.PipelineStage{
			Name:        "SummarizeMemoryUsage",
			Reader:      memoryUsageByDayStore,
			Transformer: transformer.TransformFunc(summarizeMemoryUsage),
			Writer:      memoryUsageByDaySummarizedStore,
		},
		transformer.PipelineStage{
			Name:   "WriteMemoryUsageSummaryCsv",
			Reader: memoryUsageByDaySummarizedStore,
			Writer: memoryUsageSummaryCsv,
		},
		transformer.PipelineStage{
			Name:        "OrderFilesystemUsageByTimestamp",
			Reader:      filesystemStore,
			Transformer: transformer.MakeMapFunc(orderFilesystemRecordsByDay),
			Writer:      filesystemUsageByDayStore,
		},
		transformer.PipelineStage{
			Name:        "SummarizeFilesystemUsage",
			Reader:      filesystemUsageByDayStore,
			Transformer: transformer.TransformFunc(summarizeFilesystemUsage),
			Writer:      filesystemUsageByDaySummarizedStore,
		},
		transformer.PipelineStage{
			Name:   "WriteFilesystemUsageSummaryCsv",
			Reader: filesystemUsageByDaySummarizedStore,
			Writer: filesystemUsageSummaryCsv,
		},
	}
}
コード例 #20
0
func LookupsPerDevicePipeline(levelDbManager store.Manager) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	addressIdStore := levelDbManager.Seeker("bytesperdomain-address-id-table")
	addressIdToDomainStore := levelDbManager.SeekingWriter("lookupsperdevice-address-id-to-domain")
	lookupsPerDeviceSharded := levelDbManager.ReadingWriter("lookupsperdevice-sharded")
	lookupsPerDeviceStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device")
	lookupsPerDevicePerHourStore := levelDbManager.Writer("lookupsperdevice-lookups-per-device-per-hour")
	consistentTracesStore := store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore)
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "LookupsPerDeviceMapper",
			Reader:      consistentTracesStore,
			Transformer: transformer.MakeDoFunc(lookupsPerDeviceMapper),
			Writer:      addressIdToDomainStore,
		},
		transformer.PipelineStage{
			Name:        "JoinMacWithLookups",
			Reader:      store.NewDemuxingSeeker(addressIdStore, addressIdToDomainStore),
			Transformer: transformer.TransformFunc(joinMacWithLookups),
			Writer:      lookupsPerDeviceSharded,
		},
		transformer.PipelineStage{
			Name:        "FlattenLookupsToNodeAndMac",
			Reader:      lookupsPerDeviceSharded,
			Transformer: transformer.TransformFunc(flattenLookupsToNodeAndMac),
			Writer:      lookupsPerDeviceStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenLookupsToNodeMacAndTimestamp",
			Reader:      lookupsPerDeviceSharded,
			Transformer: transformer.TransformFunc(flattenLookupsToNodeMacAndTimestamp),
			Writer:      lookupsPerDevicePerHourStore,
		},
	}
}
コード例 #21
0
func AvailabilityPipeline(levelDbManager store.Manager, jsonWriter io.Writer, timestamp int64) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	intervalsStore := levelDbManager.ReadingWriter("availability-intervals")
	consolidatedStore := levelDbManager.ReadingDeleter("availability-consolidated")
	nodesStore := levelDbManager.ReadingDeleter("availability-nodes")
	excludeRangesStore := levelDbManager.ReadingDeleter("availability-done")
	consistentRangesStore := levelDbManager.ReadingDeleter("consistent-ranges")
	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "AvailabilityIntervals",
			Reader:      store.NewRangeExcludingReader(tracesStore, excludeRangesStore),
			Transformer: transformer.TransformFunc(availabilityIntervals),
			Writer:      intervalsStore,
		},
		transformer.PipelineStage{
			Name:        "ConsolidateAvailabilityIntervals",
			Reader:      intervalsStore,
			Transformer: transformer.TransformFunc(consolidateAvailabilityIntervals),
			Writer:      store.NewTruncatingWriter(consolidatedStore),
		},
		transformer.PipelineStage{
			Name:        "AvailabilityReducer",
			Reader:      consolidatedStore,
			Transformer: transformer.TransformFunc(availabilityReducer),
			Writer:      store.NewTruncatingWriter(nodesStore),
		},
		transformer.PipelineStage{
			Name:   "AvailabilityJson",
			Reader: nodesStore,
			Writer: &availabilityJsonStore{writer: jsonWriter, timestamp: timestamp},
		},
		transformer.PipelineStage{
			Name:        "GenerateExcludedRanges",
			Reader:      consolidatedStore,
			Transformer: transformer.MakeMapFunc(generateExcludedRanges),
			Writer:      store.NewTruncatingWriter(excludeRangesStore),
		},
		transformer.PipelineStage{
			Name:        "GenerateConsistentRanges",
			Reader:      excludeRangesStore,
			Transformer: transformer.MakeDoFunc(generateConsistentRanges),
			Writer:      store.NewTruncatingWriter(consistentRangesStore),
		},
	}
}
コード例 #22
0
func BytesPerDevicePipeline(levelDbManager store.Manager, bytesPerDevicePostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	sessionsStore := levelDbManager.ReadingDeleter("bytesperdevice-session")
	addressTableStore := levelDbManager.SeekingWriter("bytesperdevice-address-table")
	flowTableStore := levelDbManager.SeekingWriter("bytesperdevice-flow-table")
	packetsStore := levelDbManager.SeekingWriter("bytesperdevice-packets")
	flowIdToMacStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-mac")
	flowIdToMacsStore := levelDbManager.SeekingWriter("bytesperdevice-flow-id-to-macs")
	bytesPerDeviceUnreducedStore := levelDbManager.SeekingWriter("bytesperdevice-unreduced")
	bytesPerDeviceSessionStore := levelDbManager.ReadingWriter("bytesperdevice-reduced-sessions")
	bytesPerDeviceStore := levelDbManager.ReadingWriter("bytesperdevice")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperdevice-consolidated-trace-key-ranges")
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerDeviceMapper",
			Reader:      newTracesStore,
			Transformer: transformer.MakeMultipleOutputsDoFunc(bytesPerDeviceMapper, 3),
			Writer:      store.NewMuxingWriter(addressTableStore, flowTableStore, packetsStore),
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "JoinMacAndFlowId",
			Reader:      store.NewPrefixIncludingReader(store.NewDemuxingSeeker(addressTableStore, flowTableStore), sessionsStore),
			Transformer: transformer.TransformFunc(joinMacAndFlowId),
			Writer:      flowIdToMacStore,
		},
		transformer.PipelineStage{
			Name:        "FlattenMacAddresses",
			Reader:      store.NewPrefixIncludingReader(flowIdToMacStore, sessionsStore),
			Transformer: transformer.TransformFunc(flattenMacAddresses),
			Writer:      flowIdToMacsStore,
		},
		transformer.PipelineStage{
			Name:        "JoinMacAndSizes",
			Reader:      store.NewPrefixIncludingReader(store.NewDemuxingSeeker(flowIdToMacsStore, packetsStore), sessionsStore),
			Transformer: transformer.TransformFunc(joinMacAndSizes),
			Writer:      bytesPerDeviceUnreducedStore,
		},
		transformer.PipelineStage{
			Name:        "ReduceBytesPerDeviceSession",
			Reader:      store.NewPrefixIncludingReader(bytesPerDeviceUnreducedStore, sessionsStore),
			Transformer: transformer.TransformFunc(reduceBytesPerDeviceSession),
			Writer:      bytesPerDeviceSessionStore,
		},
		transformer.PipelineStage{
			Name:        "ReduceBytesPerDevice",
			Reader:      bytesPerDeviceSessionStore,
			Transformer: transformer.TransformFunc(reduceBytesPerDevice),
			Writer:      bytesPerDeviceStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerDevicePostgres",
			Reader: bytesPerDeviceStore,
			Writer: bytesPerDevicePostgresStore,
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #23
0
ファイル: summary.go プロジェクト: sburnett/bismark-tools
func SummarizePipeline(levelDbManager store.Manager, csvManager store.Manager) transformer.Pipeline {
	statsStore := levelDbManager.Reader("stats")
	statsWithHourStore := levelDbManager.ReadingDeleter("stats-with-hour")
	statsWithDayStore := levelDbManager.ReadingDeleter("stats-with-day")
	statsWithReceivedTimestampStore := levelDbManager.ReadingDeleter("stats-with-received-timestamp")
	interarrivalTimesStore := levelDbManager.ReadingDeleter("interarrival-times")
	sizeSummaryStore := levelDbManager.ReadingWriter("size-summary")
	sizeSummaryByHourStore := levelDbManager.ReadingWriter("size-summary-by-hour")
	sizeSummaryByDayStore := levelDbManager.ReadingWriter("size-summary-by-day")
	interarrivalTimesSummaryStore := levelDbManager.ReadingWriter("interarrival-times-summary")
	sizePerDayStore := levelDbManager.ReadingWriter("sizes-by-day")

	sizeSummaryWriter := makeSummaryCsvWriter(csvManager, "size-summary.csv")
	sizeSummaryByHourWriter := makeSummaryByTimestampCsvWriter(csvManager, "size-summary-by-hour.csv")
	sizeSummaryByDayWriter := makeSummaryByTimestampCsvWriter(csvManager, "size-summary-by-day.csv")
	interarrivalTimesSummaryWriter := makeSummaryCsvWriter(csvManager, "interarrival-times-summary.csv")
	sizesPerDayWriter := csvManager.Writer("sizes-per-day.csv", []string{"experiment", "node", "timestamp"}, []string{"count"}, new(string), new(string), new(int64), new(int64))

	return []transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "SummarizeSizes",
			Reader:      statsStore,
			Transformer: transformer.TransformFunc(summarizeSizes),
			Writer:      sizeSummaryStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByHour",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByHour),
			Writer:      store.NewTruncatingWriter(statsWithHourStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesByHour",
			Reader:      statsWithHourStore,
			Transformer: transformer.TransformFunc(summarizeSizesByTimestamp),
			Writer:      sizeSummaryByHourStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByDay",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByDay),
			Writer:      store.NewTruncatingWriter(statsWithDayStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesByDay",
			Reader:      statsWithDayStore,
			Transformer: transformer.TransformFunc(summarizeSizesByTimestamp),
			Writer:      sizeSummaryByDayStore,
		},
		transformer.PipelineStage{
			Name:        "RekeyStatsByReceivedTimestamp",
			Reader:      statsStore,
			Transformer: transformer.MakeMapFunc(rekeyStatsByReceviedTimestamp),
			Writer:      store.NewTruncatingWriter(statsWithReceivedTimestampStore),
		},
		transformer.PipelineStage{
			Name:        "ComputeInterarrivalTimes",
			Reader:      statsWithReceivedTimestampStore,
			Transformer: transformer.TransformFunc(computeInterarrivalTimes),
			Writer:      store.NewTruncatingWriter(interarrivalTimesStore),
		},
		transformer.PipelineStage{
			Name:        "SummarizeInterarrival",
			Reader:      interarrivalTimesStore,
			Transformer: transformer.TransformFunc(summarizeInterarrivalTimes),
			Writer:      interarrivalTimesSummaryStore,
		},
		transformer.PipelineStage{
			Name:        "SummarizeSizesPerDay",
			Reader:      statsStore,
			Transformer: transformer.TransformFunc(summarizeSizesPerDay),
			Writer:      sizePerDayStore,
		},
		transformer.PipelineStage{
			Name:        "AggregateExperimentsPerDay",
			Reader:      sizePerDayStore,
			Transformer: transformer.TransformFunc(aggregateSizesPerDay),
			Writer:      sizePerDayStore,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummary",
			Reader: sizeSummaryStore,
			Writer: sizeSummaryWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummaryByHour",
			Reader: sizeSummaryByHourStore,
			Writer: sizeSummaryByHourWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizesSummaryByDay",
			Reader: sizeSummaryByDayStore,
			Writer: sizeSummaryByDayWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteInterarrivalTimesSummary",
			Reader: interarrivalTimesSummaryStore,
			Writer: interarrivalTimesSummaryWriter,
		},
		transformer.PipelineStage{
			Name:   "WriteSizePerDaySummary",
			Reader: sizePerDayStore,
			Writer: sizesPerDayWriter,
		},
	}
}
コード例 #24
0
func BytesPerMinutePipeline(levelDbManager store.Manager, bytesPerHourPostgresStore store.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	mappedStore := levelDbManager.ReadingWriter("bytesperminute-mapped")
	bytesPerMinuteStore := levelDbManager.ReadingWriter("bytesperminute")
	bytesPerHourStore := levelDbManager.ReadingWriter("bytesperhour")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("bytesperminute-consolidated-trace-key-ranges")
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "BytesPerMinuteMapper",
			Reader:      store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
			Transformer: transformer.MakeDoTransformer(bytesPerMinuteMapper(transformer.NewNonce())),
			Writer:      mappedStore,
		},
		transformer.PipelineStage{
			Name:        "BytesPerMinuteReducer",
			Reader:      mappedStore,
			Transformer: transformer.TransformFunc(bytesPerMinuteReducer),
			Writer:      bytesPerMinuteStore,
		},
		transformer.PipelineStage{
			Name:        "BytesPerHourReducer",
			Reader:      bytesPerMinuteStore,
			Transformer: transformer.TransformFunc(bytesPerHourReducer),
			Writer:      bytesPerHourStore,
		},
		transformer.PipelineStage{
			Name:   "BytesPerHourPostgres",
			Reader: bytesPerHourStore,
			Writer: bytesPerHourPostgresStore,
		},
	}, TraceKeyRangesPipeline(store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore), traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}
コード例 #25
0
func AggregateStatisticsPipeline(levelDbManager store.Manager, jsonWriter io.Writer) transformer.Pipeline {
	tracesStore := levelDbManager.Seeker("traces")
	availabilityIntervalsStore := levelDbManager.Seeker("consistent-ranges")
	traceAggregatesStore := levelDbManager.SeekingWriter("statistics-trace-aggregates")
	sessionAggregatesStore := levelDbManager.ReadingWriter("statistics-session-aggregates")
	nodeAggregatesStore := levelDbManager.ReadingWriter("statistics-node-aggregates")
	sessionsStore := levelDbManager.ReadingDeleter("statistics-sessions")
	traceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-trace-key-ranges")
	consolidatedTraceKeyRangesStore := levelDbManager.ReadingDeleter("statistics-consolidated-trace-key-ranges")
	newTracesStore := store.NewRangeExcludingReader(store.NewRangeIncludingReader(tracesStore, availabilityIntervalsStore), traceKeyRangesStore)
	return append([]transformer.PipelineStage{
		transformer.PipelineStage{
			Name:        "AggregateStatisticsMapper",
			Reader:      store.NewRangeExcludingReader(tracesStore, traceKeyRangesStore),
			Transformer: transformer.MakeMapFunc(aggregateStatisticsMapper),
			Writer:      traceAggregatesStore,
		},
		SessionPipelineStage(newTracesStore, sessionsStore),
		transformer.PipelineStage{
			Name:        "AggregateStatisticsReduceBySession",
			Reader:      store.NewPrefixIncludingReader(traceAggregatesStore, sessionsStore),
			Transformer: transformer.TransformFunc(aggregateStatisticsReduceBySession),
			Writer:      sessionAggregatesStore,
		},
		transformer.PipelineStage{
			Name:        "AggregateStatisticsReducer",
			Reader:      sessionAggregatesStore,
			Transformer: transformer.TransformFunc(aggregateStatisticsReducer),
			Writer:      nodeAggregatesStore,
		},
		transformer.PipelineStage{
			Name:   "AggregateStatisticsJson",
			Reader: nodeAggregatesStore,
			Writer: &aggregateStatisticsJsonStore{writer: jsonWriter},
		},
	}, TraceKeyRangesPipeline(newTracesStore, traceKeyRangesStore, consolidatedTraceKeyRangesStore)...)
}