Beispiel #1
0
/* Should contain a form value dbname which equals the database name
   e.g. curl www.hostname.com/backup/now -X POST -d "dbname=nameofdatabase"
   The {how} should be either "now" or "enqueue" */
func BackupHandler(w http.ResponseWriter, request *http.Request) {
	vars := mux.Vars(request)
	dbname := request.FormValue("dbname")
	t := tasks.NewTask()
	t.Action = "BackupDatabase"
	t.Data = dbname
	t.Node = globals.MyIP
	t.Role = globals.ServiceRole
	t.TTL = 3600
	t.ClusterService = globals.ClusterService
	t.NodeType = "read"
	if rdpgconsul.IsWriteNode(globals.MyIP) {
		t.NodeType = "write"
	}

	var err error
	if dbname != "rdpg" {
		//Using FindByDatabase to determine if the database actually exists to be backed up.
		inst, err := instances.FindByDatabase(dbname)
		if err != nil {
			log.Error(fmt.Sprintf("admin.BackupHandler() instances.FindByDatabase(%s) Error occurred when searching for database.", dbname))
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte("Error encountered while searching for database"))
			return
		}
		if inst == nil {
			//...then the database doesn't exist on this cluster.
			log.Debug(fmt.Sprintf("admin.BackupHandler() Attempt to initiate backup on non-existant database with name: %s", dbname))
			w.WriteHeader(http.StatusNotFound)
			w.Write([]byte("Database not found"))
			return
		}
	}

	switch vars[`how`] {
	//Immediately calls Backup() and performs the backup
	case "now":
		err = t.BackupDatabase()
		if err != nil {
			log.Error(fmt.Sprintf(`api.BackupHandler() Task.BackupDatabase() %+v ! %s`, t, err))
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte("Error encountered while trying to perform backup"))
			return
		}
		w.Write([]byte("Backup completed."))
	case "enqueue":
		// Queues up a backup to be done with a worker thread gets around to it.
		// This call returns after the queuing process is done; not after the backup is done.
		err = t.Enqueue()
		if err != nil {
			log.Error(fmt.Sprintf(`api.BackupHandler() Task.Enqueue() %+v ! %s`, t, err))
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte("Error while trying to queue"))
			return
		}
		w.Write([]byte("Backup successfully queued."))
	default:
		w.WriteHeader(http.StatusNotFound)
	}
}
Beispiel #2
0
//Work - Select a task from the queue for this server
func Work() {
	err := OpenWorkDB()
	if err != nil {
		log.Error(fmt.Sprintf("tasks.Work() OpenWorkDB() %s", err))
		proc, _ := os.FindProcess(os.Getpid())
		proc.Signal(syscall.SIGTERM)
	}
	defer CloseWorkDB()

	for {
		tasks := []Task{}
		err = WorkLock()
		if err != nil {
			time.Sleep(10 * time.Second)
			continue
		}

		nodeType := `read`
		if rdpgconsul.IsWriteNode(globals.MyIP) {
			nodeType = `write`
		}
		sq := fmt.Sprintf(`SELECT id,cluster_id,node,role,action,data,ttl,node_type,cluster_service FROM tasks.tasks WHERE locked_by IS NULL AND role IN ('all','%s') AND node IN ('*','%s') AND node_type IN ('any','%s') ORDER BY created_at DESC LIMIT 1`, globals.ServiceRole, globals.MyIP, nodeType)

		log.Trace(fmt.Sprintf(`tasks.Work() > %s`, sq))
		err = workDB.Select(&tasks, sq)
		if err != nil {
			WorkUnlock()
			if err == sql.ErrNoRows {
				log.Trace(`tasks.Work() No tasks found.`)
			} else {
				log.Error(fmt.Sprintf(`tasks.Work() Selecting Task ! %s`, err))
			}
			time.Sleep(5 * time.Second)
			continue
		}
		if len(tasks) == 0 {
			WorkUnlock()
			time.Sleep(5 * time.Second)
			continue
		}
		task := tasks[0]
		err = task.Dequeue()
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.Work() Task<%d>#Dequeue() ! %s`, task.ID, err))
			continue
		}
		WorkUnlock()

		// TODO: Come back and have a cleanup routine for tasks that were locked
		// but never finished past the TTL, perhaps a health check or such.
		err = task.Work()
		if err != nil {
			log.Error(fmt.Sprintf(`tasks.Task<%d>#Work() ! %s`, task.ID, err))

			sq = fmt.Sprintf(`UPDATE tasks.tasks SET locked_by=NULL, processing_at=NULL WHERE id=%d`, task.ID)
			log.Trace(fmt.Sprintf(`tasks#Work() > %s`, sq))
			_, err = workDB.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Work() Updating Task %d processing_at ! %s`, task.ID, err))
			}
			continue
		} else {
			// TODO: (t *Task) Delete()
			sq = fmt.Sprintf(`DELETE FROM tasks.tasks WHERE id=%d`, task.ID)
			log.Trace(fmt.Sprintf(`tasks#Work() > %s`, sq))
			_, err = workDB.Exec(sq)
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.Work() Deleting Task %d ! %s`, task.ID, err))
				continue
			}
			log.Trace(fmt.Sprintf(`tasks.Work() Task Completed! > %+v`, task))
		}
	}
}
Beispiel #3
0
func RemoteCopyHandler(w http.ResponseWriter, request *http.Request) {
	dbname := request.FormValue("dbname")
	filename := request.FormValue("filename")
	//Can't copy to s3 if there's no s3.
	if !rdpgs3.Configured {
		w.WriteHeader(http.StatusGone)
		w.Write([]byte("Remote storage has not been configured"))
		return
	}

	if dbname == "" && filename != "" {
		//A backup for no database doesn't make any sense
		w.WriteHeader(http.StatusBadRequest)
		w.Write([]byte("Cannot specify filename without database"))
		return
	}

	//Select eligible files
	filesToCopy, _, err := backup.Diff(dbname, true)
	if err != nil {
		log.Error(fmt.Sprintf(`api.CopyFileHelper() ! utils/backup.Diff(\"%s\", true) erred : %s`, dbname, err.Error()))
		w.WriteHeader(http.StatusInternalServerError)
		w.Write([]byte("Error getting file information"))
		return
	}
	//Determine this node type
	nType := "read"
	if rdpgconsul.IsWriteNode(globals.MyIP) {
		nType = "write"
	}

	numFiles := 0
	for _, dbWithBackupsToCopy := range filesToCopy {
		for _, backupToCopy := range dbWithBackupsToCopy.Backups {
			if filename != "" && backupToCopy.Name != filename {
				continue
			}
			//Gather the info necessary for uploading the file.
			fm := tasks.S3FileMetadata{}
			fm.Location = backup.Location(dbWithBackupsToCopy.Database, backupToCopy.Name)
			fm.DBName = dbWithBackupsToCopy.Database
			fm.Node = globals.MyIP
			fm.ClusterID = globals.ClusterID
			//JSONify that info
			fileToCopyParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Error attempting t)o marshal some JSON ! %+v %s", fm, err))
				w.WriteHeader(http.StatusInternalServerError)
				w.Write([]byte("An error occurred when marshalling JSON"))
				return
			}
			log.Trace(fmt.Sprintf("api.CopyFileHelper > Attempting to copy %s", fileToCopyParams))
			//Insert the task
			newTask := tasks.Task{
				ClusterID: globals.ClusterID,
				Node:      globals.MyIP,
				Role:      globals.ServiceRole,
				Action:    "CopyFileToS3",
				Data:      string(fileToCopyParams),
				TTL:       3600,
				NodeType:  nType,
			}
			err = newTask.CopyFileToS3()
			if err != nil {
				log.Error(fmt.Sprintf(`api.CopyFileHandler ! task.CopyFileToS3 erred : %s`, err.Error()))
				w.WriteHeader(http.StatusInternalServerError)
				w.Write([]byte("An error occurred when copying files to remote storage"))
				return
			}
			numFiles++
		}
	}

	w.Write([]byte(fmt.Sprintf("%d files were written to S3", numFiles)))

}