Esempio n. 1
0
/* Lists all the backup files at the desired location.
   The {where} should be "local" or "remote". The "local" option finds all the backup
   files on the local filesystem. The "remote" option will display all of the
   backup files on the remote storage, such as S3. Backups are returned in json format.
   The request must be a GET.
   Form values:
     "fmt", "filename" is the only supported value at present. Defaults to "filename"
            if absent or if left blank
     "dbname": the name of the database for which to query backups of. If left blank, returns the backups for
            all databases.
	 "globals": set to the string "true" if files with the suffix ".globals" should appear in the output.*/
func BackupListHandler(w http.ResponseWriter, request *http.Request) {
	printFormat := "filename"
	if request.FormValue("fmt") != "" {
		printFormat = request.FormValue("fmt")
	}
	showGlobals := false
	if request.FormValue("globals") == "true" {
		showGlobals = true
	}
	backupList := []backup.DatabaseBackupList{}
	// If the dbname wasn't specified of if the field is blank, then return the backups of
	// all databases.
	dbname := request.FormValue("dbname")
	// Where are we getting the files from?
	vars := mux.Vars(request)
	var err error
	switch vars["where"] {
	case "local":
		backupList, err = backup.LocalListing(dbname, showGlobals)
		if err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte(err.Error()))
			return
		}
	case "remote":
		rdpgs3.ReinitializeS3Credentials()
		if !rdpgs3.Configured {
			w.WriteHeader(http.StatusGone)
			w.Write([]byte("Remote storage has not been configured"))
		}
		backupList, err = backup.RemoteListing(dbname, showGlobals)
		if err != nil {
			w.WriteHeader(http.StatusInternalServerError)
			w.Write([]byte(err.Error()))
			return
		}
	default:
		w.WriteHeader(http.StatusNotFound)
		return
	}

	switch printFormat {
	case "filename":
		w.Header().Set("Content-Type", "application/json")
		w.Write([]byte(formatFilename(backupList)))
	//case "timestamp": TODO
	default:
		log.Debug(fmt.Sprintf(`api.BackupListHandler() Requested unsupported format.`))
		w.WriteHeader(http.StatusBadRequest)
		w.Write([]byte("Unsupported Format Requested"))
	}
}
Esempio n. 2
0
/*EnforceRemoteFileRetention - Responsible for adding removing files which are no longer
needed on the remote storage.  For example, if S3 storage is enabled, files which are
older than the retention cutoff will be deleted in order to preserve space. This function
will produce an error if S3 Storage is not enabled or properly configured for this RDPG
deployment.
*/
func (t *Task) EnforceRemoteFileRetention() (err error) {
	//Is S3 even enabled?
	if !isS3FileCopyEnabled() {
		errorMessage := "tasks.EnforceRemoteFileRetention ! S3 Storage is not enabled for this deployment"
		log.Error(errorMessage)
		return errors.New(errorMessage)
	}
	//If S3 is enabled, is all of the necessary information at least filled in?
	if !rdpgs3.Configured {
		errorMessage := "tasks.EnforceRemoteFileRetention ! S3 storage information has missing fields"
		log.Error(errorMessage)
		return errors.New(errorMessage)
	}

	var eligibleBackups []backup.DatabaseBackupList = nil
	remoteList, err := backup.RemoteListing("", true)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceRemoteFileRetention ! Error in util/backup.RemoteListing(\"\", true) : %s", err))
		return err
	}
	eligibleBackups, err = findExpiredFiles(remoteList, true)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceFileRetention() ! tasks.findExpiredFiles() : %s", err.Error()))
	}

	numFilesToDelete := 0
	for _, v := range eligibleBackups {
		numFilesToDelete += len(v.Backups)
	}
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#EnforceRemoteFileRetention() Failed to load list of files ! %s`, t.ID, err))
	}

	log.Trace(fmt.Sprintf("tasks.EnforceRemoteFileRetention() > Found %d files to delete", numFilesToDelete))
	if numFilesToDelete == 0 {
		return nil
	}

	var objectsToDelete []*s3.ObjectIdentifier = nil
	for _, eligibleDatabase := range eligibleBackups {
		for _, backupToDelete := range eligibleDatabase.Backups {
			location := strings.TrimPrefix(backup.Location(eligibleDatabase.Database, backupToDelete.Name), "/")
			objectsToDelete = append(objectsToDelete, &(s3.ObjectIdentifier{Key: &location}))
		}
	}

	creds := credentials.NewStaticCredentials(rdpgs3.AWSAccessKey, rdpgs3.AWSSecretKey, rdpgs3.Token)
	config := &aws.Config{
		Region:           &rdpgs3.AWSRegion,
		Endpoint:         &rdpgs3.Endpoint,
		S3ForcePathStyle: &rdpgs3.S3ForcePathStyle,
		Credentials:      creds,
	}
	s3client := s3.New(config)

	input := s3.DeleteObjectsInput{Bucket: &rdpgs3.BucketName, Delete: &(s3.Delete{Objects: objectsToDelete})}
	_, err = s3client.DeleteObjects(&input)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceRemoteFileRetention ! Error in s3.DeleteObjects : %s", err))
		return err
	}
	return nil
}