func runAggregateStatisticsPipeline(consistentRanges []*store.Record, allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	var writer *bytes.Buffer

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		writer = bytes.NewBuffer([]byte{})

		transformer.RunPipeline(AggregateStatisticsPipeline(levelDbManager, writer))
	}
	fmt.Printf("%s", writer.Bytes())
}
func ExampleFilterSessions() {
	usecs := int64(1000000)

	levelDbManager := store.NewSliceManager()

	traceKeyRangesStore := levelDbManager.Writer("availability-done")
	traceKeyRangesStore.BeginWriting()
	traceKeyRangesStore.WriteRecord(makeRangeRecord("node", 30*usecs, 0, 2))
	traceKeyRangesStore.WriteRecord(makeRangeRecord("node", 31*usecs, 0, 1))
	traceKeyRangesStore.WriteRecord(makeRangeRecord("node", 100*usecs, 0, 10))
	traceKeyRangesStore.WriteRecord(makeRangeRecord("node", 200*usecs, 2, 8))
	traceKeyRangesStore.BeginWriting()

	tracesStore := levelDbManager.Writer("traces")
	tracesStore.BeginWriting()
	tracesStore.WriteRecord(makeSessionRecord("node", 30*usecs, 1))
	tracesStore.WriteRecord(makeSessionRecord("node", 31*usecs, 3))
	tracesStore.WriteRecord(makeSessionRecord("node", 100*usecs, 2))
	tracesStore.WriteRecord(makeSessionRecord("node", 200*usecs, 3))
	tracesStore.EndWriting()

	runFilterSessionsPipeline(80, 120, levelDbManager)

	// Output:
	// node 30000000 1
	// node 100000000 2
}
func runAvailabilityPipelineAugmented(startTimestamp int64, timestamps map[string]int64, moreTimestamps map[string]int64) {
	levelDbManager := store.NewSliceManager()

	tracesStore := levelDbManager.Writer("traces")
	tracesStore.BeginWriting()
	for encodedKey, timestamp := range timestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	writer := bytes.NewBuffer([]byte{})
	transformer.RunPipeline(AvailabilityPipeline(levelDbManager, writer, startTimestamp))

	tracesStore.BeginWriting()
	for encodedKey, timestamp := range moreTimestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	anotherTracesSlice := make([]*store.Record, 0)
	for encodedKey, timestamp := range moreTimestamps {
		trace := Trace{
			TraceCreationTimestamp: proto.Int64(timestamp),
		}
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		anotherTracesSlice = append(anotherTracesSlice, &store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}

	anotherWriter := bytes.NewBuffer([]byte{})
	transformer.RunPipeline(AvailabilityPipeline(levelDbManager, anotherWriter, startTimestamp))
	fmt.Printf("%s", anotherWriter.Bytes())
}
func runBytesPerDevicePipeline(consistentRanges []*store.Record, allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	bytesPerDevicePostgresStore := store.SliceStore{}

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		transformer.RunPipeline(BytesPerDevicePipeline(levelDbManager, &bytesPerDevicePostgresStore))
	}

	bytesPerDeviceStore := levelDbManager.Reader("bytesperdevice")
	bytesPerDeviceStore.BeginReading()
	for {
		record, err := bytesPerDeviceStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var nodeId, macAddress string
		var timestamp, count int64
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%d: %d\n", nodeId, macAddress, timestamp, count)
	}
	bytesPerDeviceStore.EndReading()
}
func runFilesystemUsagePipeline(logs map[string]string) {
	levelDbManager := store.NewSliceManager()
	logsStore := levelDbManager.Writer("logs")
	logsStore.BeginWriting()
	for encodedKey, content := range logs {
		record := store.Record{
			Key:   []byte(encodedKey),
			Value: lex.EncodeOrDie(content),
		}
		logsStore.WriteRecord(&record)
	}
	logsStore.EndWriting()

	csvManager := store.NewCsvStdoutManager()

	transformer.RunPipeline(FilesystemUsagePipeline(levelDbManager, csvManager))
	csvManager.PrintToStdout("filesystem.csv")
}
func runBytesPerMinutePipeline(allTraces ...map[string]Trace) {
	levelDbManager := store.NewSliceManager()

	bytesPerHourPostgresStore := store.SliceStore{}

	tracesStore := levelDbManager.Writer("traces")
	for _, traces := range allTraces {
		tracesStore.BeginWriting()
		for encodedKey, trace := range traces {
			encodedTrace, err := proto.Marshal(&trace)
			if err != nil {
				panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
			}
			tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
		}
		tracesStore.EndWriting()

		transformer.RunPipeline(BytesPerMinutePipeline(levelDbManager, &bytesPerHourPostgresStore))
	}

	bytesPerMinuteStore := levelDbManager.Reader("bytesperminute")
	bytesPerMinuteStore.BeginReading()
	for {
		record, err := bytesPerMinuteStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var nodeId string
		var timestamp, count int64
		lex.DecodeOrDie(record.Key, &nodeId, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%d: %d\n", nodeId, timestamp, count)
	}
	bytesPerMinuteStore.EndReading()
}
func runLookupsPerDevicePipeline(traces map[string]Trace, consistentRanges []*store.Record, addressIdToMac map[string]string) {
	levelDbManager := store.NewSliceManager()

	tracesStore := levelDbManager.Writer("traces")
	tracesStore.BeginWriting()
	for encodedKey, trace := range traces {
		encodedTrace, err := proto.Marshal(&trace)
		if err != nil {
			panic(fmt.Errorf("Error encoding protocol buffer: %v", err))
		}
		tracesStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: encodedTrace})
	}
	tracesStore.EndWriting()

	availabilityIntervalsStore := levelDbManager.Writer("consistent-ranges")
	availabilityIntervalsStore.BeginWriting()
	for _, record := range consistentRanges {
		availabilityIntervalsStore.WriteRecord(record)
	}
	availabilityIntervalsStore.EndWriting()

	addressIdStore := levelDbManager.Writer("bytesperdomain-address-id-table")
	addressIdStore.BeginWriting()
	for encodedKey, encodedValue := range addressIdToMac {
		addressIdStore.WriteRecord(&store.Record{Key: []byte(encodedKey), Value: []byte(encodedValue)})
	}
	addressIdStore.EndWriting()

	transformer.RunPipeline(LookupsPerDevicePipeline(levelDbManager))

	fmt.Printf("LookupsPerDevice:\n")
	lookupsPerDeviceStore := levelDbManager.Reader("lookupsperdevice-lookups-per-device")
	lookupsPerDeviceStore.BeginReading()
	for {
		record, err := lookupsPerDeviceStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var (
			nodeId, macAddress, domain string
			count                      int64
		)
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &domain)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%s: %d\n", nodeId, macAddress, domain, count)
	}
	lookupsPerDeviceStore.EndReading()

	fmt.Printf("\nLookupsPerDevicePerHour:\n")
	lookupsPerDevicePerHourStore := levelDbManager.Reader("lookupsperdevice-lookups-per-device-per-hour")
	lookupsPerDevicePerHourStore.BeginReading()
	for {
		record, err := lookupsPerDevicePerHourStore.ReadRecord()
		if err != nil {
			panic(err)
		}
		if record == nil {
			break
		}
		var (
			nodeId, macAddress, domain string
			timestamp, count           int64
		)
		lex.DecodeOrDie(record.Key, &nodeId, &macAddress, &domain, &timestamp)
		lex.DecodeOrDie(record.Value, &count)
		fmt.Printf("%s,%s,%s,%d: %d\n", nodeId, macAddress, domain, timestamp, count)
	}
	lookupsPerDevicePerHourStore.EndReading()
}