// Run the defined restore operation func (r *RestoreJob) Run(finished chan<- bool) { log.Infof("restoreJob", "running restoreJob: %v", r.ID) log.Debugf("restoreJob", "Restore Job: %v", *r) r.State = "running" r.cancel = make(chan struct{}) r.Start = time.Now().String() r.NotificationQueue = notification.NewQueue(r.Coordinator, r.ID, *keyManager) sigsc := make(chan files.Signature) processed := make(chan int64) go func() { for _, sig := range r.Paramiters.FileSignatures { sigsc <- sig } close(sigsc) }() var wg sync.WaitGroup wg.Add(r.MaxWorkers) for i := 0; i < r.MaxWorkers; i++ { log.Debugf("restoreJob", "Starting Worker: %v", i) go func() { restoreWorker(r.cancel, sigsc, processed, r.Modifications, *r.To, *r.From, r.NotificationQueue.In, r.ID) wg.Done() }() } go func() { wg.Wait() log.Debugf("restoreJob", "All Workers Done") close(processed) }() r.NotificationQueue.Run() for p := range processed { atomic.AddInt64(&r.FilesProccessed, p) } r.State = "finishing notifications" f, _ := json.Marshal(&spec.JobUpdateRequest{spec.Complete}) r.NotificationQueue.Finish(¬ification.Notification{ Endpoint: "/jobs/" + strconv.Itoa(r.ID) + "/complete", Payload: f}) //Wait for the notification queue to be done <-r.NotificationQueue.Finished r.State = "finished" finished <- true }
// Run starts the backup job. // Backups proceed as follows: // It starts a goroutine to walk the JobPaths and return individual files over a channel // It then starts a number of woker goroutines (MaxWorkers) to handle the actual work // Each worker is handed the path channel and feeds off of incoming messages until it is closed // Each worker is also handed a success channel for keeping track of progress // It waits until all workes terminate, then closes the progress channel // It then checks for any queued errors then sends a finished message over the finished channel func (b *BackupJob) Run(finished chan<- bool) { log.Infof("backupJob", "running backupJob: %v", b.ID) log.Debugf("backupJob", "Backup Job: %v", *b) b.State = "running" b.cancel = make(chan struct{}) b.Start = time.Now().String() b.NotificationQueue = notification.NewQueue(b.Coordinator, b.ID, *keyManager) paths, errc := buildBackupFileList(b.cancel, b.Paramiters.Paths) processed := make(chan int64) var wg sync.WaitGroup wg.Add(b.MaxWorkers) for i := 0; i < b.MaxWorkers; i++ { log.Debugf("backupJob", "Starting Worker: %v", i) go func() { backupWorker(b.cancel, paths, processed, b.Modifications, b.Engines, b.NotificationQueue.In, b.ID) wg.Done() }() } go func() { wg.Wait() log.Debugf("backupJob", "All Workers Done") close(processed) }() b.NotificationQueue.Run() for p := range processed { atomic.AddInt64(&b.FilesProccessed, p) } if err := <-errc; err != nil { //send errors to notifier //b.NotificationQueue.In <- map[string]interface{}{"Error": err} } b.State = "finishing notifications" f, _ := json.Marshal(&spec.JobUpdateRequest{spec.Complete}) b.NotificationQueue.Finish(¬ification.Notification{ Endpoint: "/jobs/" + strconv.Itoa(b.ID) + "/complete", Payload: f}) //Wait for the notification queue to be done <-b.NotificationQueue.Finished b.State = "finished" finished <- true }
// Do restore the given file signature from the backup engine to the restore engine func (r *Restore) Do(fileSig *files.Signature) *spec.JobFile { log.Debugf("restoreWorker", "Working on: %v", fileSig.Name) jf := &spec.JobFile{} jf.Signature = *fileSig errc := make(chan error) done := make(chan bool) defer close(errc) defer close(done) //get the backup engine's Reader reader, err := r.From.Retrieve(*fileSig) if err != nil { jf.State = spec.Errors jf.Message = err.Error() return jf } //setup the restore pipe pipeR, pipeW := io.Pipe() //configure the restore point go r.To.Restore(pipeR, *fileSig, errc) //setup the decode pipeline pipe := modifications.NewPipeline(reader, errc, false, r.Modifications...) //copy the data go func() { if _, err := io.Copy(pipeW, pipe.Tail); err != nil { errc <- err return } done <- true }() //Wait for an error or jobdone select { case err := <-errc: jf.State = spec.Errors jf.Message = err.Error() case <-done: log.Debugf("restoreWorker", "Restore Done: %v", fileSig.Name) jf.State = spec.Complete } return jf }
// VerifySignature checks the incoming request agains func VerifySignature(signed []byte, signature string) error { log.Debugf("manager", "verify sig: %v", conf.Coordinator) key, err := keyManager.KeyForHost(conf.Coordinator.Address) if err != nil { return errors.New("Cannot find coordinator key") } return keys.VerifySignature(key, signed, signature) }
// Builds the file list for the backup // Listens for the cancel chanel to close to cancel walk // Walks the file tree in JobPaths and sends any found file that isn't excluded on the return chan // If there is an error, it sends the error on the error channel and returns func buildBackupFileList(cancel <-chan struct{}, jobPaths []spec.BackupPath) (<-chan string, <-chan error) { paths := make(chan string) errc := make(chan error, 1) go func() { log.Debug("backupJob", "file list routine started") defer close(paths) for _, jobPath := range jobPaths { log.Debugf("backupJob", "Walking filepath: %v", jobPath) errc <- filepath.Walk(jobPath.Path, func(path string, info os.FileInfo, err error) error { log.Debugf("backupJob", "Walk Found: %v", path) if err != nil { return err } if !info.Mode().IsRegular() { return nil } if shouldExclude(path, jobPath.Excludes) { return nil } select { case paths <- path: case <-cancel: log.Info("backupJob", "Walk Canceled") return errors.New("Walk Canceled") } return nil }) } }() return paths, errc }
// SendEmail sends an email func SendEmail(conf config.Email, body string, subject string) error { headers := make(map[string]string) headers["From"] = conf.From headers["To"] = conf.To headers["Subject"] = conf.Subject + " " + subject message := "" for k, v := range headers { message += fmt.Sprintf("%s: %s\r\n", k, v) } message += "\r\n" + body connURL, err := url.Parse(conf.ServerAddress) if err != nil { return err } log.Debugf("emailer", "URL: %+v", *connURL) host, _, err := net.SplitHostPort(connURL.Host) if err != nil { return err } var a smtp.Auth if conf.Authentication { if len(conf.User) == 0 && len(conf.Password) == 0 { return errors.New("user and password must be set to use smtp authentication") } a = smtp.PlainAuth("", conf.User, conf.Password, host) } //for tls tlsconfig := &tls.Config{ InsecureSkipVerify: true, ServerName: host, } var conn net.Conn if connURL.Scheme == "tls" { conn, err = tls.Dial("tcp", connURL.Host, tlsconfig) if err != nil { log.Debugf("emailer", "error with dial %v", err) return err } } else { conn, err = net.Dial("tcp", connURL.Host) if err != nil { log.Debugf("emailer", "error with dial %v", err) return err } } c, err := smtp.NewClient(conn, host) if err != nil { log.Debugf("emailer", "error with smtp.NewClient %v", err) return err } defer c.Close() if ok, _ := c.Extension("STARTTLS"); ok { if err = c.StartTLS(tlsconfig); err != nil { return err } } // Auth if a != nil { if err = c.Auth(a); err != nil { log.Debugf("emailer", "error with smtp.auth %v", err) return err } } // To && From if err = c.Mail(conf.From); err != nil { return err } if err = c.Rcpt(conf.To); err != nil { return err } w, err := c.Data() if err != nil { return err } _, err = w.Write([]byte(message)) if err != nil { return err } err = w.Close() if err != nil { return err } c.Quit() return nil }