Ejemplo n.º 1
0
func validate(cli storage.BlobStorageClient, blob string, startByte, endByte int64, data []byte) error {

	url := cli.GetBlobURL(cnt, blob)

	reader, err := cli.GetBlob(cnt, blob)
	if err != nil {
		return fmt.Errorf("Failed to read from %s: %s\n", url, err.Error())
	}

	defer reader.Close()

	dataRead, err := ioutil.ReadAll(reader)

	if err != nil {
		return fmt.Errorf("Failed to read from %s: %s\n", url, err.Error())
	}

	same := true
	for i := startByte; i <= endByte; i++ {
		if data[i] != dataRead[i] {
			same = false
		}
	}

	if !same {
		return fmt.Errorf("Failed to read data properly from %s: %s\n", url, err.Error())
	}

	return nil
}
Ejemplo n.º 2
0
// PutBlockBlob uploads given stream into a block blob by splitting
// data stream into chunks and uploading as blocks. Commits the block
// list at the end. This is a helper method built on top of PutBlock
// and PutBlockList methods with sequential block ID counting logic.
func putBlockBlob(b storage.BlobStorageClient, container, name string, blob io.Reader, chunkSize int) error {
	if chunkSize <= 0 || chunkSize > storage.MaxBlobBlockSize {
		chunkSize = storage.MaxBlobBlockSize
	}

	chunk := make([]byte, chunkSize)
	n, err := blob.Read(chunk)
	if err != nil && err != io.EOF {
		return err
	}

	blockList := []storage.Block{}

	for blockNum := 0; ; blockNum++ {
		id := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%011d", blockNum)))
		data := chunk[:n]
		err = b.PutBlock(container, name, id, data)
		if err != nil {
			return err
		}

		blockList = append(blockList, storage.Block{id, storage.BlockStatusLatest})

		// Read next block
		n, err = blob.Read(chunk)
		if err != nil && err != io.EOF {
			return err
		}
		if err == io.EOF {
			break
		}
	}

	return b.PutBlockList(container, name, blockList)
}
Ejemplo n.º 3
0
func clearPage(cli storage.BlobStorageClient, name string, startByte, endByte int64) error {

	if err := cli.PutPage(cnt, name, startByte, endByte, storage.PageWriteTypeClear, nil); err != nil {
		url := cli.GetBlobURL(cnt, name)
		fmt.Printf("Failed to clear pages of %s: %s\n", url, err.Error())
		return err
	}
	return nil
}
Ejemplo n.º 4
0
func writePage(cli storage.BlobStorageClient, name string, startByte, endByte int64, chunk []byte) error {

	if err := cli.PutPage(cnt, name, startByte, endByte, storage.PageWriteTypeUpdate, chunk); err != nil {
		url := cli.GetBlobURL(cnt, name)
		fmt.Printf("Failed to write pages to %s: %s\n", url, err.Error())
		return err
	}
	return nil
}
func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
	workerCount := parallelism * runtime.NumCPU()

	file, err := os.Open(source)
	if err != nil {
		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
	}
	defer file.Close()

	blobSize, pageList, err := resourceArmStorageBlobPageSplit(file)
	if err != nil {
		return fmt.Errorf("Error splitting source file %q into pages: %s", source, err)
	}

	if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil {
		return fmt.Errorf("Error creating storage blob on Azure: %s", err)
	}

	pages := make(chan resourceArmStorageBlobPage, len(pageList))
	errors := make(chan error, len(pageList))
	wg := &sync.WaitGroup{}
	wg.Add(len(pageList))

	total := int64(0)
	for _, page := range pageList {
		total += page.section.Size()
		pages <- page
	}
	close(pages)

	for i := 0; i < workerCount; i++ {
		go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{
			container: container,
			name:      name,
			source:    source,
			blobSize:  blobSize,
			client:    client,
			pages:     pages,
			errors:    errors,
			wg:        wg,
			attempts:  attempts,
		})
	}

	wg.Wait()

	if len(errors) > 0 {
		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
	}

	return nil
}
func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error {
	workerCount := parallelism * runtime.NumCPU()

	file, err := os.Open(source)
	if err != nil {
		return fmt.Errorf("Error opening source file for upload %q: %s", source, err)
	}
	defer file.Close()

	blockList, parts, err := resourceArmStorageBlobBlockSplit(file)
	if err != nil {
		return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err)
	}

	wg := &sync.WaitGroup{}
	blocks := make(chan resourceArmStorageBlobBlock, len(parts))
	errors := make(chan error, len(parts))

	wg.Add(len(parts))
	for _, p := range parts {
		blocks <- p
	}
	close(blocks)

	for i := 0; i < workerCount; i++ {
		go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{
			client:    client,
			source:    source,
			container: container,
			name:      name,
			blocks:    blocks,
			errors:    errors,
			wg:        wg,
			attempts:  attempts,
		})
	}

	wg.Wait()

	if len(errors) > 0 {
		return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors)
	}

	err = client.PutBlockList(container, name, blockList)
	if err != nil {
		return fmt.Errorf("Error updating block list for source file %q: %s", source, err)
	}

	return nil
}
Ejemplo n.º 7
0
func createBlockBlob(cli storage.BlobStorageClient, k string, b []byte) error {
	var err error
	if c.Conf.GZIP {
		if b, err = gz(b); err != nil {
			return err
		}
		k = k + ".gz"
	}

	if err := cli.CreateBlockBlobFromReader(
		c.Conf.AzureContainer,
		k,
		uint64(len(b)),
		bytes.NewReader(b),
		map[string]string{},
	); err != nil {
		return fmt.Errorf("Failed to upload data to %s/%s, %s",
			c.Conf.AzureContainer, k, err)
	}
	return nil
}