func Backup(log *logging.Logger, cfg *ini.File, grace string, reload bool) { const POOL = 5 var db database.DB var tx *sql.Tx var c = make(chan bool, POOL) var wg = new(sync.WaitGroup) var dataset, maxdatasets int var sections []*ini.Section sections = cfg.Sections() maxdatasets, _ = cfg.Section("dataset").Key(grace).Int() db.Open(log, cfg) defer db.Close() tx, _ = db.Conn.Begin() dataset = database.GetDataset(log, tx, grace) tx.Commit() if !reload { if nextds := dataset + 1; nextds > maxdatasets { dataset = 1 } else { dataset = dataset + 1 } } log.Info("Dataset processed: " + strconv.Itoa(dataset)) wg.Add(len(sections) - len(SECT_RESERVED)) for _, section := range sections { if !contains(SECT_RESERVED, section.Name()) { if section.Key("type").String() == "file" { // FIXME: useless? sect := common.Section{ Name: section.Name(), Grace: grace, Dataset: dataset, Compressed: section.Key("compress").MustBool(), } go fileBackup(log, §, cfg, c, wg) c <- true } } } wg.Wait() // Wait for all the children to die close(c) tx, _ = db.Conn.Begin() database.SetDataset(log, tx, dataset, grace) tx.Commit() }
func Restore(log *logging.Logger, cfg *ini.File, grace string) { dataset := cfg.Section("general").Key("dataset").MustInt() for _, section := range cfg.Sections() { if !contains(SECT_RESERVED, section.Name()) { if section.Key("type").String() == "file" { sect := common.Section{ Name: section.Name(), Grace: grace, Dataset: dataset, Compressed: section.Key("compress").MustBool(), } fileRestore(log, cfg, §) } } } }