Esempio n. 1
0
func mergeSegment(newDB, db *gumshoe.DB, segment *timestampSegment) error {
	// NOTE(caleb): Have to do more nasty float conversion in this function. See NOTE(caleb) in migrate.go.
	at := float64(segment.at.Unix())
	rows := make([]gumshoe.UnpackedRow, 0, len(segment.Bytes)/db.RowSize)
	for i := 0; i < len(segment.Bytes); i += db.RowSize {
		row := gumshoe.RowBytes(segment.Bytes[i : i+db.RowSize])
		unpacked := db.DeserializeRow(row)
		unpacked.RowMap[db.TimestampColumn.Name] = at
		for _, dim := range db.Schema.DimensionColumns {
			if dim.String {
				continue
			}
			value := unpacked.RowMap[dim.Name]
			if value == nil {
				continue
			}
			convertValueToFloat64(unpacked.RowMap, dim.Name)
		}
		for _, dim := range db.Schema.MetricColumns {
			convertValueToFloat64(unpacked.RowMap, dim.Name)
		}
		rows = append(rows, unpacked)
	}
	return newDB.InsertUnpacked(rows)
}
Esempio n. 2
0
func migrateSegment(newDB, oldDB *gumshoe.DB, segment *timestampSegment,
	convert func(gumshoe.UnpackedRow)) error {

	at := uint32(segment.at.Unix())
	rows := make([]gumshoe.UnpackedRow, 0, len(segment.Bytes)/oldDB.RowSize)
	for i := 0; i < len(segment.Bytes); i += oldDB.RowSize {
		row := gumshoe.RowBytes(segment.Bytes[i : i+oldDB.RowSize])
		unpacked := oldDB.DeserializeRow(row)
		// Attach a timestamp
		unpacked.RowMap[oldDB.TimestampColumn.Name] = at
		convert(unpacked)
		rows = append(rows, unpacked)
	}
	return newDB.InsertUnpacked(rows)
}