Example #1
0
/* Lists all the backup files at the desired location.
   The {where} should be "local" or "remote". The "local" option finds all the backup
   files on the local filesystem. The "remote" option will display all of the
   backup files on the remote storage, such as S3. Backups are returned in json format.
   The request must be a GET.
   Form values:
     "fmt", "filename" is the only supported value at present. Defaults to "filename"
            if absent or if left blank
     "dbname": the name of the database for which to query backups of. If left blank, returns the backups for
            all databases.
	 "globals": set to the string "true" if files with the suffix ".globals" should appear in the output.*/
func BackupListHandler(w http.ResponseWriter, request *http.Request) {
	printFormat := "filename"
	if request.FormValue("fmt") != "" {
		printFormat = request.FormValue("fmt")
	}
	showGlobals := false
	if request.FormValue("globals") == "true" {
		showGlobals = true
	}
	backupList := []backup.DatabaseBackupList{}
	// If the dbname wasn't specified of if the field is blank, then return the backups of
	// all databases.
	dbname := request.FormValue("dbname")
	// Where are we getting the files from?
	vars := mux.Vars(request)
	var err error
	switch vars["where"] {
	case "local":
		backupList, err = backup.LocalListing(dbname, showGlobals)
		if err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte(err.Error()))
			return
		}
	case "remote":
		rdpgs3.ReinitializeS3Credentials()
		if !rdpgs3.Configured {
			w.WriteHeader(http.StatusGone)
			w.Write([]byte("Remote storage has not been configured"))
		}
		backupList, err = backup.RemoteListing(dbname, showGlobals)
		if err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte(err.Error()))
			return
		}
	default:
		w.WriteHeader(http.StatusNotFound)
		return
	}

	switch printFormat {
	case "filename":
		w.Header().Set("Content-Type", "application/json")
		w.Write([]byte(formatFilename(backupList)))
	//case "timestamp": TODO
	default:
		log.Debug(fmt.Sprintf(`api.BackupListHandler() Requested unsupported format.`))
		w.WriteHeader(http.StatusBadRequest)
		w.Write([]byte("Unsupported Format Requested"))
	}
}
/*EnforceFileRetention - Responsible for adding removing files which are no longer
needed on the local file system.  For example, backup files which have been created
successfully locally and copied to S3 successfully can be deleted to preserve
local disk storage */
func (t *Task) EnforceFileRetention() (err error) {

	/*
	   If s3 copy is enabled you cannot delete files until they have been copied to s3
	   otherwise keep the most recent backups, say the last 48 hours worth and delete all others
	*/
	//Select eligible files
	var eligibleBackups []backup.DatabaseBackupList = nil
	//Get the list of backups that exist locally...
	var localList []backup.DatabaseBackupList
	//If S3 backups are enabled, then don't delete a local file unless it has been backed up already
	if isS3FileCopyEnabled() {
		//Both gets backups that are both in the local filesystem and the remote storage
		localList, err = backup.Both("", true)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task<%d>#EnforceFileRetention() ! utils/backup#Both() : %s", t.ID, err.Error()))
			return err
		}
	} else {
		localList, err = backup.LocalListing("", true)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task<%d>#EnforceFileRetention() ! utils/backup.LocalListing() : %s", t.ID, err.Error()))
			return err
		}
	}
	eligibleBackups, err = findExpiredFiles(localList, false)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceFileRetention() ! tasks.findExpiredFiles() : %s", err.Error()))
		return err
	}
	numFilesToDelete := 0
	for _, v := range eligibleBackups {
		numFilesToDelete += len(v.Backups)
	}
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#EnforceFileRetention() Failed to load list of files ! %s`, t.ID, err))
		return err
	}

	log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Found %d files to delete", numFilesToDelete))

	for _, eligibleDatabase := range eligibleBackups {
		for _, backupToDelete := range eligibleDatabase.Backups {
			fm := S3FileMetadata{
				Location:  backup.Location(eligibleDatabase.Database, backupToDelete.Name),
				DBName:    backupToDelete.Name,
				Node:      globals.MyIP,
				ClusterID: globals.ClusterID,
			}
			byteParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.EnforceFileRetention() > Error attempting to marshal some JSON ! %+v %s", fm, err))
				return err
			}
			fileToDeleteParams := string(byteParams)
			log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Attempting to add %s", fileToDeleteParams))
			newTask := Task{ClusterID: t.ClusterID, Node: t.Node, Role: t.Role, Action: "DeleteFile", Data: fileToDeleteParams, TTL: t.TTL, NodeType: t.NodeType}
			err = newTask.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.EnforceFileRetention() service task schedules ! %s`, err))
			}
		}
	}

	return
}