Exemplo n.º 1
0
//FindFilesToCopyToS3 - Responsible for copying files, such as database backups
//to S3 storage
func (t *Task) FindFilesToCopyToS3() (err error) {
	if err != nil {
		log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() Could not retrieve S3 Credentials ! %s", err))
		return err
	}

	//If S3 creds/bucket aren't set just exit since they aren't configured
	if rdpgs3.Configured == false {
		log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() S3 CONFIGURATION MISSING FOR THIS DEPLOYMENT ! S3 Credentials are not configured, skipping attempt to copy until configured "))
		return
	}

	//Select eligible files
	//Diff with empty string means get me the diff for ALL THE THINGS
	localDiff, _, err := backup.Diff("", true)
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#CopyFileToS3() Failed to load list of files ! %s`, t.ID, err))
		return
	}
	numFilesToCopy := 0
	for _, dbWithBackupsToCopy := range localDiff {
		numFilesToCopy += len(dbWithBackupsToCopy.Backups)
	}

	log.Trace(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Found %d files to copy over %d unique databases", numFilesToCopy, len(localDiff)))

	//Loop and add Tasks CopyFileToS3
	for _, dbWithBackupsToCopy := range localDiff {
		for _, backupToCopy := range dbWithBackupsToCopy.Backups {
			//Gather the info necessary for uploading the file.
			fm := S3FileMetadata{}
			fm.Location = backup.Location(dbWithBackupsToCopy.Database, backupToCopy.Name)
			fm.DBName = dbWithBackupsToCopy.Database
			fm.Node = globals.MyIP
			fm.ClusterID = globals.ClusterID
			//JSONify that info
			fileToCopyParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Error attempting to marshal some JSON ! %+v %s", fm, err))
				return err
			}
			log.Trace(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Attempting to add %s", fileToCopyParams))
			//Insert the task
			newTask := Task{ClusterID: t.ClusterID, Node: t.Node, Role: t.Role, Action: "CopyFileToS3", Data: string(fileToCopyParams), TTL: t.TTL, NodeType: t.NodeType}
			err = newTask.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.FindFilesToCopyToS3() service task schedules ! %s`, err))
			}
		}

	}
	return

}
Exemplo n.º 2
0
/*EnforceRemoteFileRetention - Responsible for adding removing files which are no longer
needed on the remote storage.  For example, if S3 storage is enabled, files which are
older than the retention cutoff will be deleted in order to preserve space. This function
will produce an error if S3 Storage is not enabled or properly configured for this RDPG
deployment.
*/
func (t *Task) EnforceRemoteFileRetention() (err error) {
	//Is S3 even enabled?
	if !isS3FileCopyEnabled() {
		errorMessage := "tasks.EnforceRemoteFileRetention ! S3 Storage is not enabled for this deployment"
		log.Error(errorMessage)
		return errors.New(errorMessage)
	}
	//If S3 is enabled, is all of the necessary information at least filled in?
	if !rdpgs3.Configured {
		errorMessage := "tasks.EnforceRemoteFileRetention ! S3 storage information has missing fields"
		log.Error(errorMessage)
		return errors.New(errorMessage)
	}

	var eligibleBackups []backup.DatabaseBackupList = nil
	remoteList, err := backup.RemoteListing("", true)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceRemoteFileRetention ! Error in util/backup.RemoteListing(\"\", true) : %s", err))
		return err
	}
	eligibleBackups, err = findExpiredFiles(remoteList, true)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceFileRetention() ! tasks.findExpiredFiles() : %s", err.Error()))
	}

	numFilesToDelete := 0
	for _, v := range eligibleBackups {
		numFilesToDelete += len(v.Backups)
	}
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#EnforceRemoteFileRetention() Failed to load list of files ! %s`, t.ID, err))
	}

	log.Trace(fmt.Sprintf("tasks.EnforceRemoteFileRetention() > Found %d files to delete", numFilesToDelete))
	if numFilesToDelete == 0 {
		return nil
	}

	var objectsToDelete []*s3.ObjectIdentifier = nil
	for _, eligibleDatabase := range eligibleBackups {
		for _, backupToDelete := range eligibleDatabase.Backups {
			location := strings.TrimPrefix(backup.Location(eligibleDatabase.Database, backupToDelete.Name), "/")
			objectsToDelete = append(objectsToDelete, &(s3.ObjectIdentifier{Key: &location}))
		}
	}

	creds := credentials.NewStaticCredentials(rdpgs3.AWSAccessKey, rdpgs3.AWSSecretKey, rdpgs3.Token)
	config := &aws.Config{
		Region:           &rdpgs3.AWSRegion,
		Endpoint:         &rdpgs3.Endpoint,
		S3ForcePathStyle: &rdpgs3.S3ForcePathStyle,
		Credentials:      creds,
	}
	s3client := s3.New(config)

	input := s3.DeleteObjectsInput{Bucket: &rdpgs3.BucketName, Delete: &(s3.Delete{Objects: objectsToDelete})}
	_, err = s3client.DeleteObjects(&input)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceRemoteFileRetention ! Error in s3.DeleteObjects : %s", err))
		return err
	}
	return nil
}
Exemplo n.º 3
0
/*EnforceFileRetention - Responsible for adding removing files which are no longer
needed on the local file system.  For example, backup files which have been created
successfully locally and copied to S3 successfully can be deleted to preserve
local disk storage */
func (t *Task) EnforceFileRetention() (err error) {

	/*
	   If s3 copy is enabled you cannot delete files until they have been copied to s3
	   otherwise keep the most recent backups, say the last 48 hours worth and delete all others
	*/
	//Select eligible files
	var eligibleBackups []backup.DatabaseBackupList = nil
	//Get the list of backups that exist locally...
	var localList []backup.DatabaseBackupList
	//If S3 backups are enabled, then don't delete a local file unless it has been backed up already
	if isS3FileCopyEnabled() {
		//Both gets backups that are both in the local filesystem and the remote storage
		localList, err = backup.Both("", true)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task<%d>#EnforceFileRetention() ! utils/backup#Both() : %s", t.ID, err.Error()))
			return err
		}
	} else {
		localList, err = backup.LocalListing("", true)
		if err != nil {
			log.Error(fmt.Sprintf("tasks.Task<%d>#EnforceFileRetention() ! utils/backup.LocalListing() : %s", t.ID, err.Error()))
			return err
		}
	}
	eligibleBackups, err = findExpiredFiles(localList, false)
	if err != nil {
		log.Error(fmt.Sprintf("tasks.EnforceFileRetention() ! tasks.findExpiredFiles() : %s", err.Error()))
		return err
	}
	numFilesToDelete := 0
	for _, v := range eligibleBackups {
		numFilesToDelete += len(v.Backups)
	}
	if err != nil {
		log.Error(fmt.Sprintf(`tasks.Task<%d>#EnforceFileRetention() Failed to load list of files ! %s`, t.ID, err))
		return err
	}

	log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Found %d files to delete", numFilesToDelete))

	for _, eligibleDatabase := range eligibleBackups {
		for _, backupToDelete := range eligibleDatabase.Backups {
			fm := S3FileMetadata{
				Location:  backup.Location(eligibleDatabase.Database, backupToDelete.Name),
				DBName:    backupToDelete.Name,
				Node:      globals.MyIP,
				ClusterID: globals.ClusterID,
			}
			byteParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.EnforceFileRetention() > Error attempting to marshal some JSON ! %+v %s", fm, err))
				return err
			}
			fileToDeleteParams := string(byteParams)
			log.Trace(fmt.Sprintf("tasks.EnforceFileRetention() > Attempting to add %s", fileToDeleteParams))
			newTask := Task{ClusterID: t.ClusterID, Node: t.Node, Role: t.Role, Action: "DeleteFile", Data: fileToDeleteParams, TTL: t.TTL, NodeType: t.NodeType}
			err = newTask.Enqueue()
			if err != nil {
				log.Error(fmt.Sprintf(`tasks.EnforceFileRetention() service task schedules ! %s`, err))
			}
		}
	}

	return
}
Exemplo n.º 4
0
func RemoteCopyHandler(w http.ResponseWriter, request *http.Request) {
	dbname := request.FormValue("dbname")
	filename := request.FormValue("filename")
	//Can't copy to s3 if there's no s3.
	if !rdpgs3.Configured {
		w.WriteHeader(http.StatusGone)
		w.Write([]byte("Remote storage has not been configured"))
		return
	}

	if dbname == "" && filename != "" {
		//A backup for no database doesn't make any sense
		w.WriteHeader(http.StatusBadRequest)
		w.Write([]byte("Cannot specify filename without database"))
		return
	}

	//Select eligible files
	filesToCopy, _, err := backup.Diff(dbname, true)
	if err != nil {
		log.Error(fmt.Sprintf(`api.CopyFileHelper() ! utils/backup.Diff(\"%s\", true) erred : %s`, dbname, err.Error()))
		w.WriteHeader(http.StatusInternalServerError)
		w.Write([]byte("Error getting file information"))
		return
	}
	//Determine this node type
	nType := "read"
	if rdpgconsul.IsWriteNode(globals.MyIP) {
		nType = "write"
	}

	numFiles := 0
	for _, dbWithBackupsToCopy := range filesToCopy {
		for _, backupToCopy := range dbWithBackupsToCopy.Backups {
			if filename != "" && backupToCopy.Name != filename {
				continue
			}
			//Gather the info necessary for uploading the file.
			fm := tasks.S3FileMetadata{}
			fm.Location = backup.Location(dbWithBackupsToCopy.Database, backupToCopy.Name)
			fm.DBName = dbWithBackupsToCopy.Database
			fm.Node = globals.MyIP
			fm.ClusterID = globals.ClusterID
			//JSONify that info
			fileToCopyParams, err := json.Marshal(fm)
			if err != nil {
				log.Error(fmt.Sprintf("tasks.FindFilesToCopyToS3() > Error attempting t)o marshal some JSON ! %+v %s", fm, err))
				w.WriteHeader(http.StatusInternalServerError)
				w.Write([]byte("An error occurred when marshalling JSON"))
				return
			}
			log.Trace(fmt.Sprintf("api.CopyFileHelper > Attempting to copy %s", fileToCopyParams))
			//Insert the task
			newTask := tasks.Task{
				ClusterID: globals.ClusterID,
				Node:      globals.MyIP,
				Role:      globals.ServiceRole,
				Action:    "CopyFileToS3",
				Data:      string(fileToCopyParams),
				TTL:       3600,
				NodeType:  nType,
			}
			err = newTask.CopyFileToS3()
			if err != nil {
				log.Error(fmt.Sprintf(`api.CopyFileHandler ! task.CopyFileToS3 erred : %s`, err.Error()))
				w.WriteHeader(http.StatusInternalServerError)
				w.Write([]byte("An error occurred when copying files to remote storage"))
				return
			}
			numFiles++
		}
	}

	w.Write([]byte(fmt.Sprintf("%d files were written to S3", numFiles)))

}