func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	parseParams()
	loadConfig()
	log.Init(common.Cfg.Syslog, common.Cfg.LogLevel)
	for i, param := range flag.Args() {
		switch param {
		case "snapshot":
			snapshot.Snapshot()
			return
		case "list-snapshots":
			ls.ListSnapshots()
			return
		case "restore":
			snapshotName := flag.Args()[i+1:]
			if len(snapshotName) == 1 {
				restore.Restore(snapshotName[0])
				return
			} else {
				log.Fatal("Too many or too few parameters for command restore: %s", snapshotName)
			}
		case "gc":
			gc.GarbageCollect()
			return
		default:
			log.Fatal("Found unhandled command '%s'.", param)
		}
	}
	log.Fatal("No command was given.")
}
Пример #2
0
func Snapshot() {

	timestamp := time.Now()
	timestampStr := timestamp.Format("20060102150405-0700MST")
	log.Info("Taking snapshot %s of bucket %s.", timestampStr, common.Cfg.BackupSet.SlaveBucket)

	common.ConfigureAws(common.Cfg.BackupSet.SlaveRegion)
	for wid := 0; wid < common.SnapshotWorkerCount; wid++ {
		readySnapshotWorkers <- wid
	}

	go func() { workRequests <- "" }()
	go dispatchWorkers()

	for newVersions := range versionsFunnel {
		versions = append(versions, newVersions...)
	}

	log.Info("Dumping snapshot to %s%s.", common.Cfg.BackupSet.SnapshotsDir, timestampStr)
	snapshot := &common.Snapshot{
		File:      common.Cfg.BackupSet.SnapshotsDir + "/" + timestampStr,
		Timestamp: timestamp,
		Contents:  versions,
	}
	if common.Cfg.BackupSet.CompressSnapshots {
		snapshot.File += ".Z"
	}
	bytes, err := json.MarshalIndent(snapshot, "", "    ")
	if err != nil {
		log.Fatal("Could not marshal snapshot %s: %s", timestampStr, err)
	}
	if common.Cfg.BackupSet.CompressSnapshots {
		f, openErr := os.OpenFile(snapshot.File, os.O_WRONLY|os.O_CREATE, 0644)
		if openErr != nil {
			log.Fatal("Could not open file %s: %s", snapshot.File, openErr)
		}
		defer f.Close()

		w := gzip.NewWriter(f)
		if _, writeErr := w.Write(bytes); writeErr != nil {
			log.Fatal("Could not write compressed snapshot file %s: %s", snapshot.File, writeErr)
		}
		w.Close()
	} else {
		if writeErr := ioutil.WriteFile(snapshot.File, bytes, 0644); err != nil {
			log.Fatal("Could not write snapshot file %s: %s", snapshot.File, writeErr)
		}
	}
	log.Info("Snapshot %s of bucket %s is DONE.", timestampStr, common.Cfg.BackupSet.SlaveBucket)
}
Пример #3
0
func GarbageCollect() {
	log.Info("Garbage collecting obsolete backups.")
	snapshots := common.LoadSnapshots()
	if len(snapshots) <= common.Cfg.BackupSet.MinimumRedundancy {
		log.Fatal("Minimum redundancy is not met. Current snapshot count is %d.", len(snapshots))
	}
	oldSnapshots, recentSnapshots := discriminateSnapshots(snapshots)
	discriminateVersions(oldSnapshots, recentSnapshots)
	versionsToRemove := discriminateVersions(oldSnapshots, recentSnapshots)
	if ok := removeVersions(versionsToRemove); !ok {
		log.Fatal("There was an unhandled error removing obsolete versions, exiting.")
	}
	removeSnapshots(oldSnapshots)
}
Пример #4
0
func LoadSnapshot(file string) (snapshot Snapshot) {
	log.Info("Loading snapshot file '%s'.", file)
	var bytes []byte
	var readErr error
	if filepath.Ext(file) == ".Z" {
		f, openErr := os.OpenFile(file, os.O_RDONLY, 0000)
		if openErr != nil {
			log.Fatal("Could not open file %s: %s", file, openErr)
		}
		defer f.Close()

		r, nrErr := gzip.NewReader(f)
		if nrErr != nil {
			log.Fatal("Could not initialize gzip decompressor: %s", nrErr)
		}
		defer r.Close()

		bytes, readErr = ioutil.ReadAll(r)
		if readErr != nil {
			log.Fatal("Could not read compressed snapshot file %s: %s", file, readErr)
		}
	} else {
		bytes, readErr = ioutil.ReadFile(file)
		if readErr != nil {
			log.Fatal("Could not read snapshot file '%s': %s", file, readErr)
		}
	}
	err := json.Unmarshal(bytes, &snapshot)
	if err != nil {
		log.Fatal("Could not parse snapshot file '%s': %s", file, err)
	}
	snapshot.File = file
	if Cfg.LogLevel > 0 {
		pretty, _ := json.MarshalIndent(snapshot, "", "    ")
		log.Debug("Snapshot '%s':\n%s", file, pretty)
	}
	return
}
Пример #5
0
func snapshotWorker(wid int, path string) {

	log.Info("[%d] Explore path '%s'.", wid, path)

	s3Client := s3.New(nil)
	params := &s3.ListObjectVersionsInput{
		Bucket:    aws.String(common.Cfg.BackupSet.SlaveBucket),
		Delimiter: aws.String("/"),
		// EncodingType:    aws.String("EncodingType"),
		// KeyMarker:       aws.String("KeyMarker"),
		MaxKeys: aws.Int64(common.SnapshotBatchSize),
		Prefix:  aws.String(path),
		// VersionIdMarker: aws.String("VersionIdMarker"),
	}
	var discoveredVersions []common.Version
	buffer := make([]common.Version, common.SnapshotBatchSize)

	for batch := 1; ; batch++ {
		log.Debug("[%d] Request batch %d for path '%s'", wid, batch, path)
		resp, err := s3Client.ListObjectVersions(params)

		if err != nil {
			if awsErr, ok := err.(awserr.Error); ok {
				if reqErr, ok := err.(awserr.RequestFailure); ok {
					// A service error occurred
					log.Error(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
					log.Fatal(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
				} else {
					log.Fatal(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
				}
			} else {
				// This case should never be hit, The SDK should alway return an
				// error which satisfies the awserr.Error interface.
				log.Fatal(err.Error())
			}
		}

		for _, cp := range resp.CommonPrefixes {
			discoveredPath := *cp.Prefix
			log.Info("[%d] Discover path '%s'.", wid, discoveredPath)
			workRequests <- discoveredPath
		}

		index := 0
		for _, v := range resp.Versions {
			if v.IsLatest == nil {
				log.Fatal("[%d] IsLatest is nil", wid)
			}
			if v.IsLatest != nil && *v.IsLatest == true {
				if v.Key == nil {
					log.Fatal("[%d] Key is nil", wid)
				}
				if v.LastModified == nil {
					log.Fatal("[%d] LastModified is nil", wid)
				}
				if v.Size == nil {
					log.Fatal("[%d] Size is nil", wid)
				}
				if v.VersionId == nil {
					log.Fatal("[%d] VersionId is nil", wid)
				}
				version := common.Version{
					Key:          *v.Key,
					LastModified: *v.LastModified,
					Size:         *v.Size,
					VersionId:    *v.VersionId,
				}
				log.Debug("[%d] Discover latest version: %s", wid, version)
				buffer[index] = version
				index++
			} else {
				log.Debug("[%d] Discover noncurrent latest version for key '%s'.", wid, *v.Key)
			}
		}
		discoveredVersions = append(discoveredVersions, buffer[0:index]...)

		if !*resp.IsTruncated {
			break
		}
		log.Info("[%d] Continue exploring path '%s'.", wid, path)

		if resp.NextVersionIdMarker != nil {
			log.Debug("[%d] NextVersionIdMarker = %+v", wid, resp.NextVersionIdMarker)
		} else {
			log.Debug("[%d] NextVersionIdMarker = nil", wid)
		}
		if resp.NextKeyMarker != nil {
			log.Debug("[%d] NextKeyMarker = %+v", wid, resp.NextKeyMarker)
		} else {
			log.Debug("[%d] NextKeyMarker = nil", wid)
		}
		params.VersionIdMarker = resp.NextVersionIdMarker
		params.KeyMarker = resp.NextKeyMarker
	}

	log.Info("[%d] Registering versions for path '%s'.", wid, path)
	versionsFunnel <- discoveredVersions

	log.Info("[%d] Done exploring path '%s'.", wid, path)
	doneSnapshotWorkers <- wid
}
Пример #6
0
func downloadWorker() {

	s3Client := s3.New(nil)

	for work := range downloadWorkQueue {

		log.Debug("[%d] Download version, retry %d: %s", work.Wid, work.Retry, work.Version)
		getParams := &s3.GetObjectInput{
			Bucket: aws.String(common.Cfg.BackupSet.SlaveBucket), // Required
			Key:    aws.String(work.Version.Key),                 // Required
			// IfMatch:                    aws.String("IfMatch"),
			// IfModifiedSince:            aws.Time(time.Now()),
			// IfNoneMatch:                aws.String("IfNoneMatch"),
			// IfUnmodifiedSince:          aws.Time(time.Now()),
			// Range:                      aws.String("Range"),
			// RequestPayer:               aws.String("RequestPayer"),
			// ResponseCacheControl:       aws.String("ResponseCacheControl"),
			// ResponseContentDisposition: aws.String("ResponseContentDisposition"),
			// ResponseContentEncoding:    aws.String("ResponseContentEncoding"),
			// ResponseContentLanguage:    aws.String("ResponseContentLanguage"),
			// ResponseContentType:        aws.String("ResponseContentType"),
			// ResponseExpires:            aws.Time(time.Now()),
			// SSECustomerAlgorithm:       aws.String("SSECustomerAlgorithm"),
			// SSECustomerKey:             aws.String("SSECustomerKey"),
			// SSECustomerKeyMD5:          aws.String("SSECustomerKeyMD5"),
			VersionId: aws.String(work.Version.VersionId),
		}
		getResp, getErr := s3Client.GetObject(getParams)
		if getErr != nil {
			if awsErr, ok := getErr.(awserr.Error); ok {
				log.Error("[%d] Error code '%s', message '%s', origin '%s'", work.Wid, awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
				if reqErr, ok := getErr.(awserr.RequestFailure); ok {
					log.Error("[%d] Service error code '%s', message '%s', status code '%d', request id '%s'", work.Wid, reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
				}
			} else {
				// This case should never be hit, The SDK should alwsy return an
				// error which satisfies the awserr.Error interface.
				log.Error("[%d] Non AWS error: %s", work.Wid, getErr.Error())
			}

			work.Retry++
			if work.Retry == common.MaxRetries {
				log.Fatal("[%d] Error downloading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			}
			log.Error("[%d] Error downloading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			downloadWorkQueue <- work
			continue
		}

		log.Debug("[%d] Read response: %s", work.Wid, work.Version)
		bytes, readErr := ioutil.ReadAll(getResp.Body)
		getResp.Body.Close()
		if readErr != nil {
			work.Retry++
			if work.Retry == common.MaxRetries {
				log.Fatal("[%d] Could not read version %s, retry %d: %s", work.Wid, work.Version, work.Retry, readErr)
			}
			log.Error("[%d] Could not read version %s, retry %d: %s", work.Wid, work.Version, work.Retry, readErr)
			downloadWorkQueue <- work
			continue
		}

		log.Debug("[%d] Downloaded version: %s", work.Wid, work.Version)
		uploadWorkQueue <- UploadWork{Wid: work.Wid, Version: work.Version, Bytes: bytes, Retry: 0}
	}
}
Пример #7
0
func uploadWorker() {

	s3Client := s3.New(nil)

	for work := range uploadWorkQueue {

		log.Debug("[%d] Upload version, retry %d: %s", work.Wid, work.Retry, work.Version)
		putParams := &s3.PutObjectInput{
			Bucket: aws.String(common.Cfg.BackupSet.MasterBucket), // Required
			Key:    aws.String(work.Version.Key),                  // Required
			// ACL:                aws.String("ObjectCannedACL"),
			Body: bytes.NewReader(work.Bytes),
			// CacheControl:       aws.String("CacheControl"),
			// ContentDisposition: aws.String("ContentDisposition"),
			// ContentEncoding:    aws.String("ContentEncoding"),
			// ContentLanguage:    aws.String("ContentLanguage"),
			// ContentLength:      aws.Long(1),
			// ContentType:        aws.String("ContentType"),
			// Expires:            aws.Time(time.Now()),
			// GrantFullControl:   aws.String("GrantFullControl"),
			// GrantRead:          aws.String("GrantRead"),
			// GrantReadACP:       aws.String("GrantReadACP"),
			// GrantWriteACP:      aws.String("GrantWriteACP"),
			// Metadata: map[string]*string{
			// 	"Key": aws.String("MetadataValue"), // Required
			//	// More values...
			// },
			// RequestPayer:            aws.String("RequestPayer"),
			// SSECustomerAlgorithm:    aws.String("SSECustomerAlgorithm"),
			// SSECustomerKey:          aws.String("SSECustomerKey"),
			// SSECustomerKeyMD5:       aws.String("SSECustomerKeyMD5"),
			// SSEKMSKeyID:             aws.String("SSEKMSKeyId"),
			// ServerSideEncryption:    aws.String("ServerSideEncryption"),
			// StorageClass:            aws.String("StorageClass"),
			// WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"),
		}
		_, putErr := s3Client.PutObject(putParams)

		if putErr != nil {
			if awsErr, ok := putErr.(awserr.Error); ok {
				log.Error("[%d] Error code '%s', message '%s', origin '%s'", work.Wid, awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
				if reqErr, ok := putErr.(awserr.RequestFailure); ok {
					log.Error("[%d] Service error code '%s', message '%s', status code '%d', request id '%s'", work.Wid, reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID())
				}
			} else {
				// This case should never be hit, The SDK should alwsy return an
				// error which satisfies the awserr.Error interface.
				log.Error("[%d] Non AWS error: %s", work.Wid, putErr.Error())
			}

			work.Retry++
			if work.Retry == common.MaxRetries {
				log.Fatal("[%d] Error uploading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			}
			log.Error("[%d] Error uploading version, retry %d: %s", work.Wid, work.Retry, work.Version)
			uploadWorkQueue <- work
			continue
		}

		log.Info("[%d] Restored version: %s", work.Wid, work.Version)
		readyRestoreWorkers <- work.Wid
	}
}