// parallelRead - reads chunks in parallel from the disks specified in []readDisks.
func parallelRead(volume, path string, readDisks []StorageAPI, orderedDisks []StorageAPI, enBlocks [][]byte, blockOffset int64, curChunkSize int64, bitRotVerify func(diskIndex int) bool, pool *bpool.BytePool) {
	// WaitGroup to synchronise the read go-routines.
	wg := &sync.WaitGroup{}

	// Read disks in parallel.
	for index := range readDisks {
		if readDisks[index] == nil {
			continue
		}
		wg.Add(1)
		// Reads chunk from readDisk[index] in routine.
		go func(index int) {
			defer wg.Done()

			// Verify bit rot for the file on this disk.
			if !bitRotVerify(index) {
				// So that we don't read from this disk for the next block.
				orderedDisks[index] = nil
				return
			}

			buf, err := pool.Get()
			if err != nil {
				errorIf(err, "unable to get buffer from byte pool")
				orderedDisks[index] = nil
				return
			}
			buf = buf[:curChunkSize]

			_, err = readDisks[index].ReadFile(volume, path, blockOffset, buf)
			if err != nil {
				orderedDisks[index] = nil
				return
			}
			enBlocks[index] = buf
		}(index)
	}

	// Waiting for first routines to finish.
	wg.Wait()
}
// erasureReadFile - read bytes from erasure coded files and writes to given writer.
// Erasure coded files are read block by block as per given erasureInfo and data chunks
// are decoded into a data block. Data block is trimmed for given offset and length,
// then written to given writer. This function also supports bit-rot detection by
// verifying checksum of individual block's checksum.
func erasureReadFile(writer io.Writer, disks []StorageAPI, volume string, path string, offset int64, length int64, totalLength int64, blockSize int64, dataBlocks int, parityBlocks int, checkSums []string, algo string, pool *bpool.BytePool) (int64, error) {
	// Offset and length cannot be negative.
	if offset < 0 || length < 0 {
		return 0, traceError(errUnexpected)
	}

	// Can't request more data than what is available.
	if offset+length > totalLength {
		return 0, traceError(errUnexpected)
	}

	// chunkSize is the amount of data that needs to be read from each disk at a time.
	chunkSize := getChunkSize(blockSize, dataBlocks)

	// bitRotVerify verifies if the file on a particular disk doesn't have bitrot
	// by verifying the hash of the contents of the file.
	bitRotVerify := func() func(diskIndex int) bool {
		verified := make([]bool, len(disks))
		// Return closure so that we have reference to []verified and
		// not recalculate the hash on it every time the function is
		// called for the same disk.
		return func(diskIndex int) bool {
			if verified[diskIndex] {
				// Already validated.
				return true
			}
			// Is this a valid block?
			isValid := isValidBlock(disks[diskIndex], volume, path, checkSums[diskIndex], algo)
			verified[diskIndex] = isValid
			return isValid
		}
	}()

	// Total bytes written to writer
	bytesWritten := int64(0)

	startBlock := offset / blockSize
	endBlock := (offset + length) / blockSize

	// curChunkSize = chunk size for the current block in the for loop below.
	// curBlockSize = block size for the current block in the for loop below.
	// curChunkSize and curBlockSize can change for the last block if totalLength%blockSize != 0
	curChunkSize := chunkSize
	curBlockSize := blockSize

	// For each block, read chunk from each disk. If we are able to read all the data disks then we don't
	// need to read parity disks. If one of the data disk is missing we need to read DataBlocks+1 number
	// of disks. Once read, we Reconstruct() missing data if needed and write it to the given writer.
	for block := startBlock; block <= endBlock; block++ {
		// Mark all buffers as unused at the start of the loop so that the buffers
		// can be reused.
		pool.Reset()

		// Each element of enBlocks holds curChunkSize'd amount of data read from its corresponding disk.
		enBlocks := make([][]byte, len(disks))

		if ((offset + bytesWritten) / blockSize) == (totalLength / blockSize) {
			// This is the last block for which curBlockSize and curChunkSize can change.
			// For ex. if totalLength is 15M and blockSize is 10MB, curBlockSize for
			// the last block should be 5MB.
			curBlockSize = totalLength % blockSize
			curChunkSize = getChunkSize(curBlockSize, dataBlocks)
		}

		// NOTE: That for the offset calculation we have to use chunkSize and
		// not curChunkSize. If we use curChunkSize for offset calculation
		// then it can result in wrong offset for the last block.
		blockOffset := block * chunkSize

		// nextIndex - index from which next set of parallel reads
		// should happen.
		nextIndex := 0

		for {
			// readDisks - disks from which we need to read in parallel.
			var readDisks []StorageAPI
			var err error
			// get readable disks slice from which we can read parallelly.
			readDisks, nextIndex, err = getReadDisks(disks, nextIndex, dataBlocks)
			if err != nil {
				return bytesWritten, err
			}
			// Issue a parallel read across the disks specified in readDisks.
			parallelRead(volume, path, readDisks, disks, enBlocks, blockOffset, curChunkSize, bitRotVerify, pool)
			if isSuccessDecodeBlocks(enBlocks, dataBlocks) {
				// If enough blocks are available to do rs.Reconstruct()
				break
			}
			if nextIndex == len(disks) {
				// No more disks to read from.
				return bytesWritten, traceError(errXLReadQuorum)
			}
			// We do not have enough enough data blocks to reconstruct the data
			// hence continue the for-loop till we have enough data blocks.
		}

		// If we have all the data blocks no need to decode, continue to write.
		if !isSuccessDataBlocks(enBlocks, dataBlocks) {
			// Reconstruct the missing data blocks.
			if err := decodeData(enBlocks, dataBlocks, parityBlocks); err != nil {
				return bytesWritten, err
			}
		}

		// Offset in enBlocks from where data should be read from.
		enBlocksOffset := int64(0)

		// Total data to be read from enBlocks.
		enBlocksLength := curBlockSize

		// If this is the start block then enBlocksOffset might not be 0.
		if block == startBlock {
			enBlocksOffset = offset % blockSize
			enBlocksLength -= enBlocksOffset
		}

		remaining := length - bytesWritten
		if remaining < enBlocksLength {
			// We should not send more data than what was requested.
			enBlocksLength = remaining
		}

		// Write data blocks.
		n, err := writeDataBlocks(writer, enBlocks, dataBlocks, enBlocksOffset, enBlocksLength)
		if err != nil {
			return bytesWritten, err
		}

		// Update total bytes written.
		bytesWritten += n

		if bytesWritten == length {
			// Done writing all the requested data.
			break
		}
	}

	// Success.
	return bytesWritten, nil
}