func Restore(snapshotName string) {
	snapshot := common.LoadSnapshot(common.Cfg.BackupSet.SnapshotsDir + snapshotName)

	log.Info("Restoring bucket %s to snapshot %s.", common.Cfg.BackupSet.MasterBucket, snapshotName)

	common.ConfigureAws(common.Cfg.BackupSet.MasterRegion)

	for i := 0; i < common.RestoreWorkerCount; i++ {
		readyRestoreWorkers <- i
		go downloadWorker()
		go uploadWorker()
	}

	for _, version := range snapshot.Contents {
		wid := <-readyRestoreWorkers
		downloadWorkQueue <- DownloadWork{Wid: wid, Version: version, Retry: 0}
	}

	for i := common.RestoreWorkerCount; i > 0; i-- {
		log.Info("Wait for %d restore workers to finish.", i)
		wid := <-readyRestoreWorkers
		log.Info("Restore worker [%d] finished.", wid)
	}
	close(downloadWorkQueue)
	close(uploadWorkQueue)

	log.Info("Restored bucket %s to snapshot %s.", common.Cfg.BackupSet.MasterBucket, snapshotName)
}
func dispatchWorkers() {
forloop:
	for {
		select {
		case path := <-workRequests:
			select {
			case wid := <-readySnapshotWorkers:
				go snapshotWorker(wid, path)
			default:
				snapshotWorkQueue = append(snapshotWorkQueue, path)
			}
		case wid := <-doneSnapshotWorkers:
			if len(snapshotWorkQueue) == 0 {
				break forloop
			}
			path := snapshotWorkQueue[0]
			snapshotWorkQueue = snapshotWorkQueue[1:]
			go snapshotWorker(wid, path)
		}
	}

	for i := common.SnapshotWorkerCount; i > 1; i-- {
		log.Info("Wait for %d snapshot workers to finish.", i)
		select {
		case wid := <-readySnapshotWorkers:
			log.Info("Snapshot worker [%d] finished, was never executed.", wid)
		case wid := <-doneSnapshotWorkers:
			log.Info("Snapshot worker [%d] finished.", wid)
		}
	}
	log.Info("All snapshot workers finished.")
	close(versionsFunnel)
}
func Snapshot() {

	timestamp := time.Now()
	timestampStr := timestamp.Format("20060102150405-0700MST")
	log.Info("Taking snapshot %s of bucket %s.", timestampStr, common.Cfg.BackupSet.SlaveBucket)

	common.ConfigureAws(common.Cfg.BackupSet.SlaveRegion)
	for wid := 0; wid < common.SnapshotWorkerCount; wid++ {
		readySnapshotWorkers <- wid
	}

	go func() { workRequests <- "" }()
	go dispatchWorkers()

	for newVersions := range versionsFunnel {
		versions = append(versions, newVersions...)
	}

	log.Info("Dumping snapshot to %s%s.", common.Cfg.BackupSet.SnapshotsDir, timestampStr)
	snapshot := &common.Snapshot{
		File:      common.Cfg.BackupSet.SnapshotsDir + "/" + timestampStr,
		Timestamp: timestamp,
		Contents:  versions,
	}
	if common.Cfg.BackupSet.CompressSnapshots {
		snapshot.File += ".Z"
	}
	bytes, err := json.MarshalIndent(snapshot, "", "    ")
	if err != nil {
		log.Fatal("Could not marshal snapshot %s: %s", timestampStr, err)
	}
	if common.Cfg.BackupSet.CompressSnapshots {
		f, openErr := os.OpenFile(snapshot.File, os.O_WRONLY|os.O_CREATE, 0644)
		if openErr != nil {
			log.Fatal("Could not open file %s: %s", snapshot.File, openErr)
		}
		defer f.Close()

		w := gzip.NewWriter(f)
		if _, writeErr := w.Write(bytes); writeErr != nil {
			log.Fatal("Could not write compressed snapshot file %s: %s", snapshot.File, writeErr)
		}
		w.Close()
	} else {
		if writeErr := ioutil.WriteFile(snapshot.File, bytes, 0644); err != nil {
			log.Fatal("Could not write snapshot file %s: %s", snapshot.File, writeErr)
		}
	}
	log.Info("Snapshot %s of bucket %s is DONE.", timestampStr, common.Cfg.BackupSet.SlaveBucket)
}
Beispiel #4
0
func discriminateSnapshots(snapshots []common.Snapshot) (old []common.Snapshot, recent []common.Snapshot) {
	retentionPeriod := time.Now().AddDate(0, 0, -common.Cfg.BackupSet.RetentionPolicy)
	log.Info("Retention period is from %s up until now.", retentionPeriod)
	for _, snapshot := range snapshots {
		if retentionPeriod.After(snapshot.Timestamp) {
			log.Info("Snapshot '%s' on %s is old.", snapshot.File, snapshot.Timestamp)
			old = append(old, snapshot)
		} else {
			log.Info("Snapshot '%s' on %s is recent.", snapshot.File, snapshot.Timestamp)
			recent = append(recent, snapshot)
		}
	}
	return
}
func LoadSnapshots() (snapshots []Snapshot) {
	log.Info("Loading snapshots")
	files, _ := filepath.Glob(Cfg.BackupSet.SnapshotsDir + "/*")
	for _, file := range files {
		snapshots = append(snapshots, LoadSnapshot(file))
	}
	return
}
Beispiel #6
0
func GarbageCollect() {
	log.Info("Garbage collecting obsolete backups.")
	snapshots := common.LoadSnapshots()
	if len(snapshots) <= common.Cfg.BackupSet.MinimumRedundancy {
		log.Fatal("Minimum redundancy is not met. Current snapshot count is %d.", len(snapshots))
	}
	oldSnapshots, recentSnapshots := discriminateSnapshots(snapshots)
	discriminateVersions(oldSnapshots, recentSnapshots)
	versionsToRemove := discriminateVersions(oldSnapshots, recentSnapshots)
	if ok := removeVersions(versionsToRemove); !ok {
		log.Fatal("There was an unhandled error removing obsolete versions, exiting.")
	}
	removeSnapshots(oldSnapshots)
}
func LoadSnapshot(file string) (snapshot Snapshot) {
	log.Info("Loading snapshot file '%s'.", file)
	var bytes []byte
	var readErr error
	if filepath.Ext(file) == ".Z" {
		f, openErr := os.OpenFile(file, os.O_RDONLY, 0000)
		if openErr != nil {
			log.Fatal("Could not open file %s: %s", file, openErr)
		}
		defer f.Close()

		r, nrErr := gzip.NewReader(f)
		if nrErr != nil {
			log.Fatal("Could not initialize gzip decompressor: %s", nrErr)
		}
		defer r.Close()

		bytes, readErr = ioutil.ReadAll(r)
		if readErr != nil {
			log.Fatal("Could not read compressed snapshot file %s: %s", file, readErr)
		}
	} else {
		bytes, readErr = ioutil.ReadFile(file)
		if readErr != nil {
			log.Fatal("Could not read snapshot file '%s': %s", file, readErr)
		}
	}
	err := json.Unmarshal(bytes, &snapshot)
	if err != nil {
		log.Fatal("Could not parse snapshot file '%s': %s", file, err)
	}
	snapshot.File = file
	if Cfg.LogLevel > 0 {
		pretty, _ := json.MarshalIndent(snapshot, "", "    ")
		log.Debug("Snapshot '%s':\n%s", file, pretty)
	}
	return
}
func snapshotWorker(wid int, path string) {

	log.Info("[%d] Explore path '%s'.", wid, path)

	s3Client := s3.New(nil)
	params := &s3.ListObjectVersionsInput{
		Bucket:    aws.String(common.Cfg.BackupSet.SlaveBucket),
		Delimiter: aws.String("/"),
		// EncodingType:    aws.String("EncodingType"),
		// KeyMarker:       aws.String("KeyMarker"),
		MaxKeys: aws.Int64(common.SnapshotBatchSize),
		Prefix:  aws.String(path),
		// VersionIdMarker: aws.String("VersionIdMarker"),
	}
	var discoveredVersions []common.Version
	buffer := make([]common.Version, common.SnapshotBatchSize)

	for batch := 1; ; batch++ {
		log.Debug("[%d] Request batch %d for path '%s'", wid, batch, path)
		resp, err := s3Client.ListObjectVersions(params)

		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				if reqErr, ok := err.(awserr.RequestFailure); ok {
					// A service error occurred
					log.Error(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
					log.Fatal(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
				} else {
					log.Fatal(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
				}
			} else {
				// This case should never be hit, The SDK should alway return an
				// error which satisfies the awserr.Error interface.
				log.Fatal(err.Error())
			}
		}

		for _, cp := range resp.CommonPrefixes {
			discoveredPath := *cp.Prefix
			log.Info("[%d] Discover path '%s'.", wid, discoveredPath)
			workRequests <- discoveredPath
		}

		index := 0
		for _, v := range resp.Versions {
			if v.IsLatest == nil {
				log.Fatal("[%d] IsLatest is nil", wid)
			}
			if v.IsLatest != nil && *v.IsLatest == true {
				if v.Key == nil {
					log.Fatal("[%d] Key is nil", wid)
				}
				if v.LastModified == nil {
					log.Fatal("[%d] LastModified is nil", wid)
				}
				if v.Size == nil {
					log.Fatal("[%d] Size is nil", wid)
				}
				if v.VersionId == nil {
					log.Fatal("[%d] VersionId is nil", wid)
				}
				version := common.Version{
					Key:          *v.Key,
					LastModified: *v.LastModified,
					Size:         *v.Size,
					VersionId:    *v.VersionId,
				}
				log.Debug("[%d] Discover latest version: %s", wid, version)
				buffer[index] = version
				index++
			} else {
				log.Debug("[%d] Discover noncurrent latest version for key '%s'.", wid, *v.Key)
			}
		}
		discoveredVersions = append(discoveredVersions, buffer[0:index]...)

		if !*resp.IsTruncated {
			break
		}
		log.Info("[%d] Continue exploring path '%s'.", wid, path)

		if resp.NextVersionIdMarker != nil {
			log.Debug("[%d] NextVersionIdMarker = %+v", wid, resp.NextVersionIdMarker)
		} else {
			log.Debug("[%d] NextVersionIdMarker = nil", wid)
		}
		if resp.NextKeyMarker != nil {
			log.Debug("[%d] NextKeyMarker = %+v", wid, resp.NextKeyMarker)
		} else {
			log.Debug("[%d] NextKeyMarker = nil", wid)
		}
		params.VersionIdMarker = resp.NextVersionIdMarker
		params.KeyMarker = resp.NextKeyMarker
	}

	log.Info("[%d] Registering versions for path '%s'.", wid, path)
	versionsFunnel <- discoveredVersions

	log.Info("[%d] Done exploring path '%s'.", wid, path)
	doneSnapshotWorkers <- wid
}
func uploadWorker() {

	s3Client := s3.New(nil)

	for work := range uploadWorkQueue {

		log.Debug("[%d] Upload version, retry %d: %s", work.Wid, work.Retry, work.Version)
		putParams := &s3.PutObjectInput{
			Bucket: aws.String(common.Cfg.BackupSet.MasterBucket), // Required
			Key:    aws.String(work.Version.Key),                  // Required
			// ACL:                aws.String("ObjectCannedACL"),
			Body: bytes.NewReader(work.Bytes),
			// CacheControl:       aws.String("CacheControl"),
			// ContentDisposition: aws.String("ContentDisposition"),
			// ContentEncoding:    aws.String("ContentEncoding"),
			// ContentLanguage:    aws.String("ContentLanguage"),
			// ContentLength:      aws.Long(1),
			// ContentType:        aws.String("ContentType"),
			// Expires:            aws.Time(time.Now()),
			// GrantFullControl:   aws.String("GrantFullControl"),
			// GrantRead:          aws.String("GrantRead"),
			// GrantReadACP:       aws.String("GrantReadACP"),
			// GrantWriteACP:      aws.String("GrantWriteACP"),
			// Metadata: map[string]*string{
			// 	"Key": aws.String("MetadataValue"), // Required
			//	// More values...
			// },
			// RequestPayer:            aws.String("RequestPayer"),
			// SSECustomerAlgorithm:    aws.String("SSECustomerAlgorithm"),
			// SSECustomerKey:          aws.String("SSECustomerKey"),
			// SSECustomerKeyMD5:       aws.String("SSECustomerKeyMD5"),
			// SSEKMSKeyID:             aws.String("SSEKMSKeyId"),
			// ServerSideEncryption:    aws.String("ServerSideEncryption"),
			// StorageClass:            aws.String("StorageClass"),
			// WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
		}
		_, putErr := s3Client.PutObject(putParams)

		if putErr != nil {
			if awsErr, ok := putErr.(awserr.Error); ok {
				log.Error("[%d] Error code '%s', message '%s', origin '%s'", work.Wid, awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
				if reqErr, ok := putErr.(awserr.RequestFailure); ok {
					log.Error("[%d] Service error code '%s', message '%s', status code '%d', request id '%s'", work.Wid, reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
				}
			} else {
				// This case should never be hit, The SDK should alwsy return an
				// error which satisfies the awserr.Error interface.
				log.Error("[%d] Non AWS error: %s", work.Wid, putErr.Error())
			}

			work.Retry++
			if work.Retry == common.MaxRetries {
				log.Fatal("[%d] Error uploading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			}
			log.Error("[%d] Error uploading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			uploadWorkQueue <- work
			continue
		}

		log.Info("[%d] Restored version: %s", work.Wid, work.Version)
		readyRestoreWorkers <- work.Wid
	}
}