Ejemplo n.º 1
0
func (ms metaSequenceObject) beginFetchingChildSequences(start, length uint64) chan interface{} {
	input := make(chan interface{})
	output := orderedparallel.New(input, func(item interface{}) interface{} {
		i := item.(int)
		return ms.getChildSequence(i)
	}, int(length))

	go func() {
		for i := start; i < start+length; i++ {
			input <- int(i)
		}

		close(input)
	}()
	return output
}
Ejemplo n.º 2
0
func runLog(args []string) int {
	useColor = shouldUseColor()

	database, value, err := spec.GetPath(args[0])
	if err != nil {
		d.CheckErrorNoUsage(err)
	}
	defer database.Close()

	if value == nil {
		d.CheckErrorNoUsage(fmt.Errorf("Object not found: %s", args[0]))
	}

	origCommit, ok := value.(types.Struct)
	if !ok || !datas.IsCommitType(origCommit.Type()) {
		d.CheckError(fmt.Errorf("%s does not reference a Commit object", args[0]))
	}

	iter := NewCommitIterator(database, origCommit)
	displayed := 0
	if maxCommits <= 0 {
		maxCommits = math.MaxInt32
	}

	inChan := make(chan interface{}, parallelism)
	outChan := orderedparallel.New(inChan, func(node interface{}) interface{} {
		buff := &bytes.Buffer{}
		printCommit(node.(LogNode), buff, database)
		return buff.Bytes()
	}, parallelism)

	go func() {
		for ln, ok := iter.Next(); ok && displayed < maxCommits; ln, ok = iter.Next() {
			inChan <- ln
			displayed++
		}
		close(inChan)
	}()

	pgr := outputpager.Start()
	defer pgr.Stop()

	for commitBuff := range outChan {
		io.Copy(pgr.Writer, bytes.NewReader(commitBuff.([]byte)))
	}
	return 0
}
Ejemplo n.º 3
0
func NewStreamingBlob(r io.Reader, vrw ValueReadWriter) Blob {
	sc := newEmptySequenceChunker(vrw, newBlobLeafChunkFn(nil), newIndexedMetaSequenceChunkFn(BlobKind, nil), func(item sequenceItem, rv *rollingValueHasher) {
		rv.HashByte(item.(byte))
	})

	// TODO: The code below is a temporary. It's basically a custom leaf-level chunker for blobs. There are substational perf gains by doing it this way as it avoids the cost of boxing every single byte which is chunked.
	chunkBuff := [8192]byte{}
	chunkBytes := chunkBuff[:]
	rv := newRollingValueHasher()
	offset := 0
	addByte := func(b byte) bool {
		if offset >= len(chunkBytes) {
			tmp := make([]byte, len(chunkBytes)*2)
			copy(tmp, chunkBytes)
			chunkBytes = tmp
		}
		chunkBytes[offset] = b
		offset++
		rv.HashByte(b)
		return rv.crossedBoundary
	}

	input := make(chan interface{}, 16)
	output := orderedparallel.New(input, func(item interface{}) interface{} {
		cp := item.([]byte)
		col, key, numLeaves := chunkBlobLeaf(vrw, cp)
		var ref Ref
		if vrw != nil {
			ref = vrw.WriteValue(col)
			col = nil
		} else {
			ref = NewRef(col)
		}
		return newMetaTuple(ref, key, numLeaves, col)
	}, 16)

	makeChunk := func() {
		cp := make([]byte, offset)
		copy(cp, chunkBytes[0:offset])
		input <- cp
		offset = 0
	}

	go func() {
		readBuff := [8192]byte{}
		for {
			n, err := r.Read(readBuff[:])
			for i := 0; i < n; i++ {
				if addByte(readBuff[i]) {
					rv.ClearLastBoundary()
					makeChunk()
				}
			}
			if err != nil {
				d.Chk.True(io.EOF == err)
				if offset > 0 {
					makeChunk()
				}
				close(input)
				break
			}
		}
	}()

	for b := range output {
		if sc.parent == nil {
			sc.createParent()
		}
		sc.parent.Append(b.(metaTuple))
	}

	return newBlob(sc.Done(vrw).(indexedSequence))
}