Example #1
0
func treehash(files []string) {
	th := glacier.NewTreeHash()

	for _, v := range files {
		th.Reset()

		file, err := os.Open(v)
		if err != nil {
			fmt.Printf("%s: %v\n", v, err)
			continue
		}

		_, err = io.Copy(th, file)
		if err != nil {
			file.Close()
			fmt.Printf("%s: %v\n", v, err)
			continue
		}

		th.Close()
		fmt.Println(string(toHex(th.TreeHash())), v)
	}
}
Example #2
0
func multipart(args []string) {
	if len(args) < 1 {
		fmt.Println("no multipart command")
		os.Exit(1)
	}
	command := args[0]
	args = args[1:]

	switch command {
	case "init", "run":
		args = getConnection(args)
		uploadData.Region = connection.Signature.Region.Region

		if len(args) < 3 {
			fmt.Println("no vault, file name and/or part size")
			os.Exit(1)
		}
		uploadData.Vault = args[0]
		uploadData.FileName = args[1]
		partSize, err := strconv.ParseInt(args[2], 10, 64)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		uploadData.PartSize = partSize * 1024 * 1024
		args = args[3:]

		if len(args) > 0 {
			uploadData.Description = args[0]
		}

		f, err := os.Open(uploadData.FileName)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		defer f.Close()

		s, _ := f.Stat()
		parts := s.Size() / uploadData.PartSize
		if s.Size()%uploadData.PartSize > 0 {
			parts++
		}
		uploadData.Parts = make([]multipartPart, parts)

		partHasher := glacier.NewTreeHash()
		wholeHasher := glacier.NewTreeHash()
		hasher := io.MultiWriter(partHasher, wholeHasher)
		for i := range uploadData.Parts {
			n, err := io.CopyN(hasher, f, uploadData.PartSize)
			if err != nil && err != io.EOF {
				fmt.Println(err)
				os.Exit(1)
			}
			uploadData.Size += n
			partHasher.Close()
			uploadData.Parts[i].Hash = string(toHex(partHasher.Hash()))
			uploadData.Parts[i].TreeHash = string(toHex(partHasher.TreeHash()))
			partHasher.Reset()
		}
		wholeHasher.Close()
		uploadData.TreeHash = string(toHex(wholeHasher.TreeHash()))

		uploadData.UploadId, err = connection.InitiateMultipart(uploadData.Vault, uploadData.PartSize,
			uploadData.Description)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		out, err := os.Create(uploadData.FileName + ".gob")
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		enc := gob.NewEncoder(out)
		err = enc.Encode(uploadData)
		out.Close()
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		if command == "init" {
			return
		}

		fallthrough

	case "resume":
		var parts int
		if command == "resume" {
			if len(args) < 1 {
				fmt.Println("no file")
				os.Exit(1)
			}
			fileName := args[0]
			args = args[1:]

			if len(args) > 0 {
				parts64, err := strconv.ParseInt(args[0], 10, 64)
				if err != nil {
					fmt.Println(err)
					os.Exit(1)
				}
				parts = int(parts64)
			}

			gobFile, err := os.Open(fileName + ".gob")
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			dec := gob.NewDecoder(gobFile)
			err = dec.Decode(&uploadData)
			gobFile.Close()
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			parseRegion(uploadData.Region)
		}

		file, err := os.Open(uploadData.FileName)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		defer file.Close()

		var start int64
		index := 0
		for _, v := range uploadData.Parts {
			if v.Uploaded {
				start += uploadData.PartSize
				index++
			} else {
				break
			}
		}

		if len(uploadData.Parts) < parts {
			parts = len(uploadData.Parts)
		}
		if parts == 0 {
			parts = len(uploadData.Parts)
		}

		i, try := 0, 0
		for i < parts {
			if index >= len(uploadData.Parts) {
				break
			}

			_, err := file.Seek(start, 0)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			body := &limitedReadSeeker{file, uploadData.PartSize, uploadData.PartSize}

			err = connection.UploadMultipart(uploadData.Vault, uploadData.UploadId, start, body)

			if err != nil {
				fmt.Println(err)
				if try++; try > retries {
					fmt.Println("too many retrys")
					os.Exit(1)
				}
				continue
			}

			i++
			try = 0

			uploadData.Parts[index].Uploaded = true
			gobFile, err := os.Create(uploadData.FileName + ".gob.new")
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			enc := gob.NewEncoder(gobFile)
			err = enc.Encode(uploadData)
			gobFile.Close()
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			err = os.Remove(uploadData.FileName + ".gob")
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			err = os.Rename(uploadData.FileName+".gob.new", uploadData.FileName+".gob")
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			start += uploadData.PartSize
			index++
		}

		done := true
		for _, v := range uploadData.Parts {
			if !v.Uploaded {
				done = false
				break
			}
		}

		if done {
			archiveId, err := connection.CompleteMultipart(uploadData.Vault, uploadData.UploadId, uploadData.TreeHash,
				uploadData.Size)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			fmt.Println(archiveId)

			err = os.Remove(uploadData.FileName + ".gob")
			if err != nil {
				fmt.Println(err)
			}
		}

	case "print":
		if len(args) < 1 {
			fmt.Println("no file name")
			os.Exit(1)
		}
		fileName := args[0]

		f, err := os.Open(fileName + ".gob")
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		defer f.Close()

		dec := gob.NewDecoder(f)
		var data multipartData
		err = dec.Decode(&data)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		fmt.Println("Region:", data.Region)
		fmt.Println("Vault:", data.Vault)
		fmt.Println("Description:", data.Description)
		fmt.Println("Part Size:", prettySize(data.PartSize))
		fmt.Println("Upload ID:", data.UploadId)
		uploaded := 0
		for i := range data.Parts {
			if data.Parts[i].Uploaded {
				uploaded++
			}
		}
		fmt.Println("Parts Uploaded:", uploaded, "/", len(data.Parts))
		fmt.Println("Tree Hash:", data.TreeHash)
		fmt.Println("Size:", data.Size, prettySize(data.Size))

	case "abort":
		if len(args) < 1 {
			fmt.Println("no file name")
			os.Exit(1)
		}
		fileName := args[0]

		f, err := os.Open(fileName + ".gob")
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		defer f.Close()

		dec := gob.NewDecoder(f)
		var data multipartData
		err = dec.Decode(&data)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		parseRegion(data.Region)

		err = connection.AbortMultipart(data.Vault, data.UploadId)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		err = os.Remove(fileName + ".gob")
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

	case "list":
		if len(args) < 1 {
			fmt.Println("no multipart sub command")
		}
		subCommand := args[0]
		args = args[1:]

		switch subCommand {
		case "parts":
			if len(args) < 1 {
				fmt.Println("no file")
				os.Exit(1)
			}
			fileName := args[0]

			f, err := os.Open(fileName + ".gob")
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			defer f.Close()

			dec := gob.NewDecoder(f)
			var data multipartData
			err = dec.Decode(&data)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			parseRegion(data.Region)

			parts, err := connection.ListMultipartParts(data.Vault, data.UploadId, "", 0)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			fmt.Printf("%+v\n", *parts)

		case "uploads":
			args = getConnection(args)

			if len(args) < 1 {
				fmt.Println("no vault")
				os.Exit(1)
			}
			vault := args[0]

			parts, _, err := connection.ListMultipartUploads(vault, "", 0)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			for _, v := range parts {
				fmt.Println("Archive Description:", v.ArchiveDescription)
				fmt.Println("Creation Data:", v.CreationDate)
				fmt.Println("Multipart Upload ID:", v.MultipartUploadId)
				fmt.Println("Part Size:", prettySize(v.PartSizeInBytes))
				fmt.Println("Vault ARN:", v.VaultARN)
				fmt.Println()
			}

		default:
			fmt.Println("unknown multipart sub command:", subCommand)
			os.Exit(1)
		}

	default:
		fmt.Println("unknown multipart command:", command)
		os.Exit(1)
	}
}
Example #3
0
func job(args []string) {
	if len(args) < 1 {
		fmt.Println("no job command")
		os.Exit(1)
	}
	command := args[0]
	args = args[1:]

	switch command {
	case "inventory":
		args = getConnection(args)

		if len(args) < 1 {
			fmt.Println("no vault")
			os.Exit(1)
		}
		vault := args[0]
		args = args[1:]

		var description, topic string
		if len(args) > 0 {
			topic = args[0]
		}
		if len(args) > 1 {
			description = args[1]
		}

		jobId, err := connection.InitiateInventoryJob(vault, topic, description)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		fmt.Println(jobId)

	case "archive":
		args = getConnection(args)

		if len(args) < 2 {
			fmt.Println("no vault")
			os.Exit(1)
		}
		vault := args[0]
		archive := args[1]
		args = args[2:]

		var description, topic string
		if len(args) > 0 {
			topic = args[0]
		}
		if len(args) > 1 {
			description = args[1]
		}

		jobId, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		fmt.Println(jobId)

	case "list":
		args = getConnection(args)

		if len(args) < 1 {
			fmt.Println("no vault")
			os.Exit(1)
		}
		vault := args[0]

		jobs, _, err := connection.ListJobs(vault, "", "", "", 0)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		for _, v := range jobs {
			fmt.Println("Action:", v.Action)
			if v.Action == "ArchiveRetrieval" {
				fmt.Println("Archive ID:", v.ArchiveId)
				fmt.Println("Archive Size:", v.ArchiveSizeInBytes, prettySize(v.ArchiveSizeInBytes))
			}
			fmt.Println("Completed:", v.Completed)
			if v.Completed {
				fmt.Println("Completion Date:", v.CompletionDate)
			}
			fmt.Println("Creation Date:", v.CreationDate)
			if v.Completed && v.Action == "InventoryRetrieval" {
				fmt.Println("Invenotry Size:", v.InventorySizeInBytes, prettySize(int64(v.InventorySizeInBytes)))
			}
			fmt.Println("Job Description:", v.JobDescription)
			fmt.Println("Job ID:", v.JobId)
			if v.Action == "ArchiveRetrieval" {
				fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
			}
			fmt.Println("SNS Topic:", v.SNSTopic)
			fmt.Println("Status Code:", v.StatusCode)
			fmt.Println("Status Message:", v.StatusMessage)
			fmt.Println("Vault ARN:", v.VaultARN)
			fmt.Println()
		}

	case "describe":
		args = getConnection(args)

		if len(args) < 2 {
			fmt.Println("no vault and/or job id")
			os.Exit(1)
		}
		vault := args[0]
		jobId := args[1]

		job, err := connection.DescribeJob(vault, jobId)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}

		fmt.Println("Action:", job.Action)
		if job.Action == "ArchiveRetrieval" {
			fmt.Println("Archive ID:", job.ArchiveId)
			fmt.Println("Archive Size:", job.ArchiveSizeInBytes, prettySize(job.ArchiveSizeInBytes))
		}
		fmt.Println("Completed:", job.Completed)
		if job.Completed {
			fmt.Println("Completion Date:", job.CompletionDate)
		}
		fmt.Println("Creation Date:", job.CreationDate)
		if job.Completed && job.Action == "InventoryRetrieval" {
			fmt.Println("Invenotry Size:", job.InventorySizeInBytes, prettySize(int64(job.InventorySizeInBytes)))
		}
		fmt.Println("Job Description:", job.JobDescription)
		fmt.Println("Job ID:", job.JobId)
		if job.Action == "ArchiveRetrieval" {
			fmt.Println("SHA256 Tree Hash:", job.SHA256TreeHash)
		}
		fmt.Println("SNS Topic:", job.SNSTopic)
		fmt.Println("Status Code:", job.StatusCode)
		fmt.Println("Status Message:", job.StatusMessage)
		fmt.Println("Vault ARN:", job.VaultARN)

	case "get":
		if len(args) < 1 {
			fmt.Println("no job sub command")
			os.Exit(1)
		}
		subCommand := args[0]
		args = args[1:]

		switch subCommand {
		case "inventory":
			args = getConnection(args)

			if len(args) < 2 {
				fmt.Println("no vault and/or job id")
				os.Exit(1)
			}
			vault := args[0]
			job := args[1]

			inventory, err := connection.GetInventoryJob(vault, job)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}

			fmt.Println("Vault ARN:", inventory.VaultARN)
			fmt.Println("Inventory Date:", inventory.InventoryDate)

			for _, v := range inventory.ArchiveList {
				fmt.Println()
				fmt.Println("Archive ID:", v.ArchiveId)
				fmt.Println("Archive Description:", v.ArchiveDescription)
				fmt.Println("Creation Date:", v.CreationDate)
				fmt.Println("Size:", v.Size, prettySize(v.Size))
				fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
			}

		case "archive":
			args = getConnection(args)

			if len(args) < 3 {
				fmt.Println("no vault, job id, and/or output file")
				os.Exit(1)
			}
			vault := args[0]
			job := args[1]
			fileName := args[2]

			file, err := os.Create(fileName)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			defer file.Close()

			archive, _, err := connection.GetRetrievalJob(vault, job, 0, 0)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
			defer archive.Close()

			_, err = io.Copy(file, archive)
			if err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
		default:
			fmt.Println("unknown job sub command:", subCommand)
			os.Exit(1)
		}

	case "run":
		args = getConnection(args)
		if len(args) < 4 {
			fmt.Println("no vault, archive, download size and/or output file")
			os.Exit(1)
		}
		vault := args[0]
		archive := args[1]
		partSize, err := strconv.ParseInt(args[2], 10, 64)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		partSize *= 1024 * 1024
		output = args[3]
		args = args[4:]

		var topic string
		if len(args) > 0 {
			topic = args[0]
			args = args[1:]
		}

		var description string
		if len(args) > 0 {
			description = args[0]
			args = args[1:]
		}

		// initiate retrieval job
		job, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		log.Println("initiated retrieval job:", job)

		// save state
		data.Region = connection.Signature.Region.Name
		data.Vault = vault
		data.PartSize = partSize
		data.Job = job
		data.saveState(output + ".gob")

		// wait for job to complete, using polling
		time.Sleep(3 * time.Hour)

		// check status sleeping 15m?
		var try int
		for {
			job, err := connection.DescribeJob(vault, job)
			if err != nil {
				log.Println(err)
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
			} else {
				try = 0
				if job.Completed {
					data.Size = int64(job.ArchiveSizeInBytes)
					data.FullTreeHash = job.SHA256TreeHash
					data.saveState(output + ".gob")
					break
				}
				log.Println("retrieval job not yet completed")
				time.Sleep(15 * time.Minute)
			}
		}

		fallthrough
	case "resume":
		if command == "resume" {
			if len(args) < 1 {
				fmt.Println("no filename")
				os.Exit(1)
			}
			output = args[0]
			args = args[1:]

			file, err := os.Open(output + ".gob")
			if err != nil {
				fmt.Println("could not resume:", err)
				os.Exit(1)
			}
			dec := gob.NewDecoder(file)
			err = dec.Decode(&data)
			file.Close()
			if err != nil {
				fmt.Println("could not resume:", err)
				os.Exit(1)
			}

			getConnection([]string{data.Region})

			if len(args) > 0 {
				data.PartSize, err = strconv.ParseInt(args[0], 10, 64)
				if err != nil {
					fmt.Println(err)
					os.Exit(1)
				}
				data.PartSize *= 1024 * 1024
				data.saveState(output + ".gob")
			}
		}

		file, err := os.OpenFile(output, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
		if err != nil {
			log.Println(err)
			os.Exit(1)
		}
		defer file.Close()

		// loop getting parts, checking tree hash of each
		buffer := bytes.NewBuffer(make([]byte, data.PartSize))
		hasher := glacier.NewTreeHash()
		var try int

		if command == "resume" {
			_, err = file.Seek(int64(data.Downloaded), 0)
			if err != nil {
				fmt.Println("could not resume:", err)
				os.Exit(1)
			}
		}

		for data.Downloaded < data.Size {
			log.Println("downloading", data.Downloaded, "to", data.Downloaded+data.PartSize-1, "of", data.Size)
			buffer.Reset()
			hasher.Reset()

			part, treeHash, err := connection.GetRetrievalJob(data.Vault, data.Job, data.Downloaded,
				data.Downloaded+data.PartSize-1)
			if err != nil {
				log.Println("GetRetrievalJob:", err)
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
				continue
			}

			// copy to temporary buffer
			_, err = io.Copy(buffer, part)
			if err != nil {
				log.Println("io.Copy:", err)
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
				continue
			}

			// check tree hash
			_, err = io.CopyN(hasher, buffer, int64(data.PartSize))
			if err != nil && err != io.EOF {
				log.Println("hashing", err)
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
				continue
			}
			hasher.Close()
			if treeHash != "" && treeHash != string(toHex(hasher.TreeHash())) {
				log.Println("tree hash mismatch")
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
				continue
			}

			// copy to file
			_, err = file.Write(buffer.Bytes())
			if err != nil {
				log.Println("copying buffer to file:", err)
				try++
				if try > retries {
					fmt.Println("too many retries")
					os.Exit(1)
				}
			}

			// save state
			data.Downloaded += data.PartSize
			data.saveState(output + ".gob")

			try = 0
		}

		// check tree hash of entire archive
		log.Println("download complete, verifying")
		_, err = file.Seek(0, 0)
		if err != nil {
			log.Println("seek:", err)
			os.Exit(1)
		}

		hasher.Reset()
		_, err = io.Copy(hasher, file)
		if err != nil {
			log.Println("hashing whole file:", err)
			os.Exit(1)
		}
		hasher.Close()

		if string(toHex(hasher.TreeHash())) != data.FullTreeHash {
			log.Println("entire file tree hash mismatch")
			os.Exit(1)
		}
		log.Println("success")

		os.Remove(output + ".gob")

	default:
		fmt.Println("unknown job command:", command)
		os.Exit(1)
	}
}