Exemplo n.º 1
0
// SliceIQ is an infiniteQueue - Based on:
// github.com/kylelemons/iq
func (n *Queue) processQ() {

	log.Info("notification", "Process Q started")

recv:
	for {
		// Ensure that pending always has values so the select can
		// multiplex between the receiver and sender properly
		if len(n.Pending) == 0 {
			v, ok := <-n.In
			if !ok {
				// in is closed, flush values
				break
			}

			// We now have something to send
			n.Pending = append(n.Pending, v)
		}

		select {
		// Queue incoming values
		case v, ok := <-n.In:
			if !ok {
				// in is closed, flush values
				break recv
			}
			n.Pending = append(n.Pending, v)

			// Send queued values
		case n.next <- n.Pending[0]:
			n.Pending = n.Pending[1:]

		//stop closed, which means we need to exit without flushing
		case <-n.stop:
			log.Infof("notification", "Process Queue got stop, pending: %v", len(n.Pending))
			return
		}
	}

	// After in is closed, we may still have events to send
	log.Infof("notification", "Flushing queue. length: %v", len(n.Pending))
	for _, v := range n.Pending {
		select {
		case n.next <- v:
		case <-n.stop:
			//stop called...finish
			return
		}
	}

	//Lastly, we close the next channel to tell the notifier we are done
	close(n.next)
}
Exemplo n.º 2
0
func main() {

	var cPath string

	flag.StringVar(&cPath, "config", "", "Path to the config file")
	flag.Parse()

	conf, err := config.ParseConfig(cPath)
	if err != nil {
		fmt.Println("Error parsing config file:", err)
		os.Exit(1)
	}

	log.Init(conf.Log)
	log.Infof("main", "coordinator starting. Version: %s", version.Version.String())
	log.Debug("main", "config:", *conf)

	err = manager.Init(conf)
	if err != nil {
		log.Fatalf("main", "Error initializing manager: %v", err)
	}

	httpAPI := new(httpapi.Server)

	address := strings.Split(conf.Host.Address, ":")
	if len(address) != 2 {
		log.Fatalf("main", "Invalid host address. Must be in form ip:port")
	}

	httpAPI.Configure(apihandler.Routes)
	httpAPI.Start(address[0], address[1])

}
Exemplo n.º 3
0
func (s *ScheduledJob) Run() {
	jobID, err := RunBackup(s.Schedule.Backup)
	if err != nil {
		log.Errorf("scheduler", "Could not run scheduled Backup: %v. Error: ", *s, err)
	}
	log.Infof("scheduler", "Scheduled Job started. ID: %v", jobID)
}
Exemplo n.º 4
0
// Run the defined restore operation
func (r *RestoreJob) Run(finished chan<- bool) {

	log.Infof("restoreJob", "running restoreJob: %v", r.ID)
	log.Debugf("restoreJob", "Restore Job: %v", *r)

	r.State = "running"
	r.cancel = make(chan struct{})
	r.Start = time.Now().String()
	r.NotificationQueue = notification.NewQueue(r.Coordinator, r.ID, *keyManager)

	sigsc := make(chan files.Signature)
	processed := make(chan int64)

	go func() {
		for _, sig := range r.Paramiters.FileSignatures {
			sigsc <- sig
		}
		close(sigsc)
	}()

	var wg sync.WaitGroup
	wg.Add(r.MaxWorkers)
	for i := 0; i < r.MaxWorkers; i++ {
		log.Debugf("restoreJob", "Starting Worker: %v", i)
		go func() {
			restoreWorker(r.cancel, sigsc, processed, r.Modifications, *r.To, *r.From, r.NotificationQueue.In, r.ID)
			wg.Done()
		}()
	}
	go func() {
		wg.Wait()
		log.Debugf("restoreJob", "All Workers Done")
		close(processed)
	}()

	r.NotificationQueue.Run()

	for p := range processed {
		atomic.AddInt64(&r.FilesProccessed, p)
	}

	r.State = "finishing notifications"

	f, _ := json.Marshal(&spec.JobUpdateRequest{spec.Complete})

	r.NotificationQueue.Finish(&notification.Notification{
		Endpoint: "/jobs/" + strconv.Itoa(r.ID) + "/complete",
		Payload:  f})

	//Wait for the notification queue to be done
	<-r.NotificationQueue.Finished

	r.State = "finished"

	finished <- true
}
Exemplo n.º 5
0
// Run starts the backup job.
// Backups proceed as follows:
// It starts a goroutine to walk the JobPaths and return individual files over a channel
// It then starts a number of woker goroutines (MaxWorkers) to handle the actual work
// Each worker is handed the path channel and feeds off of incoming messages until it is closed
// Each worker is also handed a success channel for keeping track of progress
// It waits until all workes terminate, then closes the progress channel
// It then checks for any queued errors then sends a finished message over the finished channel
func (b *BackupJob) Run(finished chan<- bool) {

	log.Infof("backupJob", "running backupJob: %v", b.ID)
	log.Debugf("backupJob", "Backup Job: %v", *b)

	b.State = "running"
	b.cancel = make(chan struct{})
	b.Start = time.Now().String()
	b.NotificationQueue = notification.NewQueue(b.Coordinator, b.ID, *keyManager)
	paths, errc := buildBackupFileList(b.cancel, b.Paramiters.Paths)

	processed := make(chan int64)
	var wg sync.WaitGroup
	wg.Add(b.MaxWorkers)
	for i := 0; i < b.MaxWorkers; i++ {
		log.Debugf("backupJob", "Starting Worker: %v", i)
		go func() {
			backupWorker(b.cancel, paths, processed, b.Modifications, b.Engines, b.NotificationQueue.In, b.ID)
			wg.Done()
		}()
	}
	go func() {
		wg.Wait()
		log.Debugf("backupJob", "All Workers Done")
		close(processed)
	}()

	b.NotificationQueue.Run()

	for p := range processed {
		atomic.AddInt64(&b.FilesProccessed, p)
	}

	if err := <-errc; err != nil {
		//send errors to notifier
		//b.NotificationQueue.In <- map[string]interface{}{"Error": err}
	}

	b.State = "finishing notifications"

	f, _ := json.Marshal(&spec.JobUpdateRequest{spec.Complete})

	b.NotificationQueue.Finish(&notification.Notification{
		Endpoint: "/jobs/" + strconv.Itoa(b.ID) + "/complete",
		Payload:  f})

	//Wait for the notification queue to be done
	<-b.NotificationQueue.Finished

	b.State = "finished"

	finished <- true
}
Exemplo n.º 6
0
func initCron() error {
	schedules = cron.New()
	ss, err := gDb.ScheduleList()
	if err != nil {
		log.Errorf("manager", "Could not init schedule list: %v", err)
		return err
	}

	for _, s := range ss {

		schedules.AddJob(s.String(), &ScheduledJob{s})
	}

	schedules.Start()

	log.Infof("scheduler", "Active Schedules: %v", schedules.Entries())

	return nil
}
Exemplo n.º 7
0
func wait(on int, over chan bool) {
	<-over
	log.Infof("manager", "Job Finished: %v", on)
	removeJob(on)
}
Exemplo n.º 8
0
// Cancel will stop the bacup job. Any running workers will finish
func (b *BackupJob) Cancel() {
	log.Infof("backupJob", "Canceling backup job: %v", b.ID)
	b.State = "canceling"
	close(b.cancel)
}