func (cmd *Command) writeWALFiles(w io.WriteCloser, files []string, key string) error { fmt.Fprintln(w, "# writing wal data") // we need to make sure we write the same order that the wal received the data sort.Strings(files) var once sync.Once warn := func() { msg := fmt.Sprintf(`WARNING: detected deletes in wal file. Some series for %q may be brought back by replaying this data. To resolve, you can either let the shard snapshot prior to exporting the data or manually editing the exported file. `, key) fmt.Fprintln(cmd.Stderr, msg) } // use a function here to close the files in the defers and not let them accumulate in the loop write := func(f string) error { file, err := os.OpenFile(f, os.O_RDONLY, 0600) if err != nil { return fmt.Errorf("%v", err) } defer file.Close() reader := tsm1.NewWALSegmentReader(file) defer reader.Close() for reader.Next() { entry, err := reader.Read() if err != nil { n := reader.Count() fmt.Fprintf(os.Stderr, "file %s corrupt at position %d", file.Name(), n) break } switch t := entry.(type) { case *tsm1.DeleteWALEntry: once.Do(warn) continue case *tsm1.DeleteRangeWALEntry: once.Do(warn) continue case *tsm1.WriteWALEntry: var pairs string for key, values := range t.Values { measurement, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) for _, value := range values { if (value.UnixNano() < cmd.startTime) || (value.UnixNano() > cmd.endTime) { continue } switch value.Value().(type) { case float64: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case int64: pairs = field + "=" + fmt.Sprintf("%vi", value.Value()) case bool: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case string: pairs = field + "=" + fmt.Sprintf("%q", models.EscapeStringField(fmt.Sprintf("%s", value.Value()))) default: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) } fmt.Fprintln(w, string(measurement), pairs, value.UnixNano()) } } } } return nil } for _, f := range files { if err := write(f); err != nil { return err } } return nil }
func (c *cmdExport) writeFiles() error { // open our output file and create an output buffer var w io.WriteCloser w, err := os.Create(c.out) if err != nil { return err } defer w.Close() if c.compress { w = gzip.NewWriter(w) } // Write out all the DDL fmt.Fprintln(w, "# DDL") for key := range c.files { keys := strings.Split(key, string(byte(os.PathSeparator))) fmt.Fprintf(w, "CREATE DATABASE %s\n", keys[0]) fmt.Fprintf(w, "CREATE RETENTION POLICY %s ON %s DURATION inf REPLICATION 1\n", keys[1], keys[0]) } fmt.Fprintln(w, "# DML") for key, files := range c.files { keys := strings.Split(key, string(byte(os.PathSeparator))) fmt.Fprintf(w, "# CONTEXT-DATABASE:%s\n", keys[0]) fmt.Fprintf(w, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1]) for _, f := range files { // use an anonymous function here to close the files in the defers and not let them // accumulate in the loop if err := func(f string) error { file, err := os.OpenFile(f, os.O_RDONLY, 0600) if err != nil { return fmt.Errorf("%v", err) } defer file.Close() reader, err := tsm1.NewTSMReader(file) if err != nil { log.Printf("unable to read %s, skipping\n", f) return nil } defer reader.Close() for i := 0; i < reader.KeyCount(); i++ { var pairs string key, typ := reader.KeyAt(i) values, _ := reader.ReadAll(key) measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) for _, value := range values { switch typ { case tsm1.BlockFloat64: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case tsm1.BlockInteger: pairs = field + "=" + fmt.Sprintf("%vi", value.Value()) case tsm1.BlockBoolean: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case tsm1.BlockString: pairs = field + "=" + fmt.Sprintf("%q", models.EscapeStringField(fmt.Sprintf("%s", value.Value()))) default: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) } fmt.Fprintln(w, measurement, pairs, value.UnixNano()) } } return nil }(f); err != nil { return err } } _ = key } return nil }
func (cmd *Command) writeTsmFiles(w io.WriteCloser, files []string) error { fmt.Fprintln(w, "# writing tsm data") // we need to make sure we write the same order that the files were written sort.Strings(files) // use a function here to close the files in the defers and not let them accumulate in the loop write := func(f string) error { file, err := os.OpenFile(f, os.O_RDONLY, 0600) if err != nil { return fmt.Errorf("%v", err) } defer file.Close() reader, err := tsm1.NewTSMReader(file) if err != nil { log.Printf("unable to read %s, skipping\n", f) return nil } defer reader.Close() if sgStart, sgEnd := reader.TimeRange(); sgStart > cmd.endTime || sgEnd < cmd.startTime { return nil } for i := 0; i < reader.KeyCount(); i++ { var pairs string key, typ := reader.KeyAt(i) values, _ := reader.ReadAll(string(key)) measurement, field := tsm1.SeriesAndFieldFromCompositeKey(key) for _, value := range values { if (value.UnixNano() < cmd.startTime) || (value.UnixNano() > cmd.endTime) { continue } switch typ { case tsm1.BlockFloat64: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case tsm1.BlockInteger: pairs = field + "=" + fmt.Sprintf("%vi", value.Value()) case tsm1.BlockBoolean: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) case tsm1.BlockString: pairs = field + "=" + fmt.Sprintf("%q", models.EscapeStringField(fmt.Sprintf("%s", value.Value()))) default: pairs = field + "=" + fmt.Sprintf("%v", value.Value()) } fmt.Fprintln(w, string(measurement), pairs, value.UnixNano()) } } return nil } for _, f := range files { if err := write(f); err != nil { return err } } return nil }