func runConsolidateTraceKeyRanges(records, moreRecords []*store.Record) {
	traces := store.SliceStore{}
	traces.BeginWriting()
	for _, record := range records {
		traces.WriteRecord(record)
	}
	traces.EndWriting()

	rangesStore := store.SliceStore{}
	consolidatedStore := store.SliceStore{}
	transformer.RunPipeline(TraceKeyRangesPipeline(&traces, &rangesStore, &consolidatedStore))

	moreTraces := store.SliceStore{}
	moreTraces.BeginWriting()
	for _, record := range moreRecords {
		moreTraces.WriteRecord(record)
	}
	moreTraces.EndWriting()

	transformer.RunPipeline(TraceKeyRangesPipeline(&moreTraces, &rangesStore, &consolidatedStore))

	rangesStore.BeginReading()
	for {
		record, err := rangesStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		fmt.Printf("%s %s\n", formatTraceKey(record.Key), formatTraceKey(record.Value))
	}
	rangesStore.EndReading()
}
func runAvailabilityPipelineAugmented(startTimestamp int64, timestamps map[string]int64, moreTimestamps map[string]int64) {
	levelDbManager := store.NewSliceManager()

	tracesStore := levelDbManager.Writer("traces")
	tracesStore.BeginWriting()
	for encodedKey, timestamp := range timestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	writer := bytes.NewBuffer([]byte{})
	transformer.RunPipeline(AvailabilityPipeline(levelDbManager, writer, startTimestamp))

	tracesStore.BeginWriting()
	for encodedKey, timestamp := range moreTimestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	anotherTracesSlice := make([]*store.Record, 0)
	for encodedKey, timestamp := range moreTimestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		anotherTracesSlice = append(anotherTracesSlice, &store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}

	anotherWriter := bytes.NewBuffer([]byte{})
	transformer.RunPipeline(AvailabilityPipeline(levelDbManager, anotherWriter, startTimestamp))
	fmt.Printf("%s", anotherWriter.Bytes())
}
func runSessions(records []*store.Record) {
	traces := store.SliceStore{}
	traces.BeginWriting()
	for _, record := range records {
		traces.WriteRecord(record)
	}
	traces.EndWriting()

	sessionsStore := store.SliceStore{}
	transformer.RunPipeline([]transformer.PipelineStage{
		SessionPipelineStage(&traces, &sessionsStore),
	})

	sessionsStore.BeginReading()
	for {
		record, err := sessionsStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		fmt.Printf("%s\n", formatSessionKey(record.Key))
	}
	sessionsStore.EndReading()
}
func runAggregateStatisticsPipeline(consistentRanges []*store.Record, allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	var writer *bytes.Buffer

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		writer = bytes.NewBuffer([]byte{})

		transformer.RunPipeline(AggregateStatisticsPipeline(levelDbManager, writer))
	}
	fmt.Printf("%s", writer.Bytes())
}
Ejemplo n.º 5
0
func main() {
	pipelineFuncs := map[string]transformer.PipelineThunk{
		"index":    pipelineIndex,
		"disjoint": pipelineDisjointPackages,
	}
	name, pipeline := transformer.ParsePipelineChoice(pipelineFuncs)

	go cube.Run(fmt.Sprintf("bismark_experiments_manager_logs_pipeline_%s", name))

	transformer.RunPipeline(pipeline)
}
Ejemplo n.º 6
0
func main() {
	pipelineFuncs := map[string]transformer.PipelineThunk{
		"csv":       pipelineCsv,
		"stats":     pipelineStats,
		"summarize": pipelineSummarize,
		"timescsv":  pipelineTimesCsv,
	}
	name, pipeline := transformer.ParsePipelineChoice(pipelineFuncs)

	go cube.Run(fmt.Sprintf("bismark_uploads_stats_%s", name))

	transformer.RunPipeline(pipeline)
}
func runBytesPerDevicePipeline(consistentRanges []*store.Record, allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	bytesPerDevicePostgresStore := store.SliceStore{}

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		transformer.RunPipeline(BytesPerDevicePipeline(levelDbManager, &bytesPerDevicePostgresStore))
	}

	bytesPerDeviceStore := levelDbManager.Reader("bytesperdevice")
	bytesPerDeviceStore.BeginReading()
	for {
		record, err := bytesPerDeviceStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var nodeId, macAddress string
		var timestamp, count int64
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%d: %d\n", nodeId, macAddress, timestamp, count)
	}
	bytesPerDeviceStore.EndReading()
}
Ejemplo n.º 8
0
func runFilesystemUsagePipeline(logs map[string]string) {
	levelDbManager := store.NewSliceManager()
	logsStore := levelDbManager.Writer("logs")
	logsStore.BeginWriting()
	for encodedKey, content := range logs {
		record := store.Record{
			Key:   []byte(encodedKey),
			Value: lex.EncodeOrDie(content),
		}
		logsStore.WriteRecord(&record)
	}
	logsStore.EndWriting()

	csvManager := store.NewCsvStdoutManager()

	transformer.RunPipeline(FilesystemUsagePipeline(levelDbManager, csvManager))
	csvManager.PrintToStdout("filesystem.csv")
}
Ejemplo n.º 9
0
func main() {
	pipelineFuncs := map[string]transformer.PipelineThunk{
		"devicescount": pipelineDevicesCount,
		"filesystem":   pipelineFilesystem,
		"index":        pipelineIndex,
		"iproute":      pipelineIpRoute,
		"memory":       pipelineMemory,
		"packages":     pipelinePackages,
		"reboots":      pipelineReboots,
		"summarize":    pipelineSummarize,
		"uptime":       pipelineUptime,
	}
	name, pipeline := transformer.ParsePipelineChoice(pipelineFuncs)

	go cube.Run(fmt.Sprintf("bismark_health_pipeline_%s", name))

	transformer.RunPipeline(pipeline)
}
Ejemplo n.º 10
0
func main() {
	pipelineFuncs := map[string]transformer.PipelineThunk{
		"availability":     pipelineAvailability,
		"bytesperdevice":   pipelineBytesPerDevice,
		"bytesperdomain":   pipelineBytesPerDomain,
		"bytesperminute":   pipelineBytesPerMinute,
		"filternode":       pipelineFilterNode,
		"filterdates":      pipelineFilterDates,
		"index":            pipelineIndex,
		"lookupsperdevice": pipelineLookupsPerDevice,
		"statistics":       pipelineStatistics,
	}
	name, pipeline := transformer.ParsePipelineChoice(pipelineFuncs)

	go cube.Run(fmt.Sprintf("bismark_passive_pipeline_%s", name))

	transformer.RunPipeline(pipeline)
}
func runFilterSessionsPipeline(startSecs, endSecs int64, levelDbManager store.Manager) {

	transformer.RunPipeline(FilterSessionsPipeline(startSecs, endSecs, levelDbManager, "test"))

	filteredStore := levelDbManager.Reader("test")
	filteredStore.BeginReading()
	for {
		record, err := filteredStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var traceKey TraceKey
		lex.DecodeOrDie(record.Key, &traceKey)
		fmt.Printf("%s %d %d\n", traceKey.NodeId, traceKey.SessionId, traceKey.SequenceNumber)
	}
	filteredStore.EndReading()
}
func runBytesPerMinutePipeline(allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	bytesPerHourPostgresStore := store.SliceStore{}

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		transformer.RunPipeline(BytesPerMinutePipeline(levelDbManager, &bytesPerHourPostgresStore))
	}

	bytesPerMinuteStore := levelDbManager.Reader("bytesperminute")
	bytesPerMinuteStore.BeginReading()
	for {
		record, err := bytesPerMinuteStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var nodeId string
		var timestamp, count int64
		lex.DecodeOrDie(record.Key, &nodeId, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%d: %d\n", nodeId, timestamp, count)
	}
	bytesPerMinuteStore.EndReading()
}
func runLookupsPerDevicePipeline(traces map[string]Trace, consistentRanges []*store.Record, addressIdToMac map[string]string) {
	levelDbManager := store.NewSliceManager()

	tracesStore := levelDbManager.Writer("traces")
	tracesStore.BeginWriting()
	for encodedKey, trace := range traces {
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	addressIdStore := levelDbManager.Writer("bytesperdomain-address-id-table")
	addressIdStore.BeginWriting()
	for encodedKey, encodedValue := range addressIdToMac {
		addressIdStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: []byte(encodedValue)})
	}
	addressIdStore.EndWriting()

	transformer.RunPipeline(LookupsPerDevicePipeline(levelDbManager))

	fmt.Printf("LookupsPerDevice:\n")
	lookupsPerDeviceStore := levelDbManager.Reader("lookupsperdevice-lookups-per-device")
	lookupsPerDeviceStore.BeginReading()
	for {
		record, err := lookupsPerDeviceStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var (
			nodeId, macAddress, domain string
			count                      int64
		)
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &domain)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%s: %d\n", nodeId, macAddress, domain, count)
	}
	lookupsPerDeviceStore.EndReading()

	fmt.Printf("\nLookupsPerDevicePerHour:\n")
	lookupsPerDevicePerHourStore := levelDbManager.Reader("lookupsperdevice-lookups-per-device-per-hour")
	lookupsPerDevicePerHourStore.BeginReading()
	for {
		record, err := lookupsPerDevicePerHourStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var (
			nodeId, macAddress, domain string
			timestamp, count           int64
		)
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &domain, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%s,%d: %d\n", nodeId, macAddress, domain, timestamp, count)
	}
	lookupsPerDevicePerHourStore.EndReading()
}