Exemple #1
0
func (p *Pack) parseDeltaEntry(bytes []byte, pot PackedObjectType, oid *objects.ObjectId, i int) *PackedObject {
	var (
		deltaDeflated packedDelta
		baseOffset    int64
		dp            *packedObjectParser
		err           error
	)
	e := p.idx.entries[i]
	switch pot {
	case ObjectRefDelta:
		var oid *objects.ObjectId
		deltaDeflated, oid = readPackedRefDelta(bytes)
		e := p.idx.entryById(oid)
		if e == nil {
			util.PanicErrf("nil entry for base object with id %s", oid.String())
		}
		baseOffset = e.offset
	case ObjectOffsetDelta:
		if deltaDeflated, baseOffset, err = readPackedOffsetDelta(bytes); err != nil {
			util.PanicErrf("Err parsing size: %v. Could not determine size for %s", err, e.String())
		}
		baseOffset = e.offset - baseOffset
	}
	base := p.findObjectByOffset(baseOffset)
	bytes = []byte(deltaDeflated)
	if dp, err = newPackedObjectParser(bytes, oid); err != nil {
		util.PanicErr(err.Error())
	}
	return dp.applyDelta(base, oid)
}
Exemple #2
0
func (p *objectParser) parseWhoWhen(marker string) *objects.WhoWhen {
	p.ConsumeString(marker)
	p.ConsumeByte(token.SP)
	user := strings.Trim(p.ReadString(token.LT), string(token.SP))
	email := p.ReadString(token.GT)
	p.ConsumeByte(token.SP)
	seconds := p.ParseInt(token.SP, 10, 64)

	// time zone
	var sign int64
	signStr := p.ConsumeStrings(signs)
	switch signStr {
	case token.PLUS:
		sign = 1
	case token.MINUS:
		sign = -1
	default:
		util.PanicErrf("expecting: +/- sign")
	}

	tzHours := p.ParseIntN(2, 10, 64)
	tzMins := p.ParseIntN(2, 10, 64)
	if tzMins < 0 || tzMins > 59 {
		util.PanicErrf("expecting 00 to 59 for tz minutes")
	}

	// time zone offset in signed minutes
	tz := int(sign * (tzHours*int64(60) + tzMins))

	ww := objects.NewWhoWhen(user, email, seconds, tz)

	return ww
}
Exemple #3
0
//parse the pack's meta data and close it
func (p *packIdxParser) ParsePack() *Pack {
	//parse the index and construct the pack
	idx := p.parseIdx()
	objects := make([]*PackedObject, idx.count)
	pack := &Pack{
		PackVersion,
		objects,
		idx,
		p.name,
		p.packOpener,
		nil,
	}
	//verify the pack file
	if err := pack.open(); err != nil {
		util.PanicErrf("Could not open pack file %s: %s", pack.name, err)
	}
	dataParser := util.NewDataParser(bufio.NewReader(pack.file))
	dataParser.ConsumeString(PackSignature)
	dataParser.ConsumeBytes([]byte{0, 0, 0, PackVersion})
	count := dataParser.ParseIntBigEndian(4)
	if count != idx.count {
		util.PanicErrf("Pack file count doesn't match idx file count for pack-%s!", p.name) //todo: don't panic.
	}
	pack.close()
	return pack
}
Exemple #4
0
func (p *Pack) findObjectByOffset(offset int64) *PackedObject {
	i := sort.Search(len(p.idx.entries), func(j int) bool {
		return p.idx.entries[j].offset >= int64(offset)
	})
	if p.idx.entries[i].offset != offset {
		util.PanicErrf("Could not find object with offset %d. Closest match was %d.", offset, i)
	}
	if p.content[i] == nil {
		p.content[i] = p.parseEntry(i)
	}
	if p.content[i] == nil {
		util.PanicErrf("Could not find or parse object with offset %d", offset)
	}
	return p.content[i]
}
Exemple #5
0
func (p *objectParser) ParseFileMode(delim byte) (mode objects.FileMode) {
	var ok bool
	if mode, ok = assertFileMode(uint16(p.ParseInt(delim, 8, 32))); !ok {
		util.PanicErrf("expected: filemode")
	}
	return
}
Exemple #6
0
func (p *revParser) Parse() error {
	e := util.SafeParse(func() {
		if p.rev == "" {
			util.PanicErr("revision spec is empty")
		}

		if p.PeekByte() == ':' {
			util.PanicErr(": syntaxes not supported") // TODO
		}

		start := p.Count()
		// read until modifier or end
		for !p.EOF() {
			if !isModifier(p.PeekByte()) {
				p.ReadByte()
			} else {
				break
			}
		}
		end := p.Count()

		rev := p.rev[start:end]
		if rev == "" {
			util.PanicErr("revision is empty")
		}

		err := p.findObject(rev)
		if err != nil {
			util.PanicErr(err.Error())
		}

		for !p.EOF() {
			var err error
			b := p.ReadByte()
			if b == '^' {
				if !p.EOF() && p.PeekByte() == '{' {
					p.ConsumeByte('{')
					otype := objects.ObjectType(p.ConsumeStrings(token.ObjectTypes))
					err = applyDereference(p, otype)
					if err != nil {

					}
					p.ConsumeByte('}')
				} else {
					err = applyParentFunc(p, CommitNthParent)
				}
			} else if b == '~' {
				err = applyParentFunc(p, CommitNthAncestor)
			} else {
				util.PanicErrf("unexpected modifier: '%s'", string(b))
			}

			if err != nil {
				util.PanicErr(err.Error())
			}
		}
	})
	return e
}
Exemple #7
0
// ParseOidBytes reads the next objects.OidSize bytes from
// the Reader and generates an ObjectId.
func (p *objectIdParser) ParseOidBytes() *objects.ObjectId {
	b := p.Consume(objects.OidSize)
	oid, e := objects.OidFromBytes(b)
	if e != nil {
		util.PanicErrf("expected: hash bytes %d long", objects.OidSize)
	}
	return oid
}
Exemple #8
0
// ParseOid reads the next objects.OidHexSize bytes from the
// Reader and places the resulting object id in oid.
func (p *objectIdParser) ParseOid() *objects.ObjectId {
	hex := string(p.Consume(objects.OidHexSize))
	oid, e := objects.OidFromString(hex)
	if e != nil {
		util.PanicErrf("expected: hex string of size %d", objects.OidHexSize)
	}
	return oid
}
Exemple #9
0
// read all the bytes of the ith object of the pack file
func (p *Pack) readEntry(i int) []byte {
	e := p.idx.entries[i]
	var size int64
	if i+1 < len(p.idx.entries) {
		size = p.idx.entries[i+1].offset - e.offset
	} else {
		if info, err := p.file.Stat(); err != nil {
			util.PanicErrf("Could not determine size of pack file %s: %s", p.file.Name(), err)
		} else {
			size = info.Size() - e.offset
		}
	}
	data := make([]byte, size, size)
	if _, err := p.file.ReadAt(data, e.offset); err != nil {
		util.PanicErrf("Could not read %d bytes from %d of pack file %s: %s", len(data), e.offset, p.file.Name(), err)
	}
	return data
}
Exemple #10
0
// parse the ith entry of this pack, opening the pack resource if necessary
func (p *Pack) parseEntry(i int) (obj *PackedObject) {
	if len(p.content) > i && p.content[i] != nil {
		return p.content[i] //already parsed
	}
	if err := p.open(); err != nil {
		util.PanicErrf("Could not open pack file %s: %s", p.name, err.Error())
	}
	size, pot, bytes := p.entrySizeTypeData(i)
	e := p.idx.entries[i]
	switch {
	case pot == PackedBlob || pot == PackedCommit || pot == PackedTree || pot == PackedTag:
		obj = parseNonDeltaEntry(bytes, pot, e.ObjectId, int64(size))
	case pot == ObjectOffsetDelta || pot == ObjectRefDelta:
		obj = p.parseDeltaEntry(bytes, pot, e.ObjectId, i)
	default:
		util.PanicErrf("Unrecognized object type %d in pack %s for entry with id %s", pot, p.name, e.ObjectId)
	}
	return
}
Exemple #11
0
func (p *packIdxParser) parseIdx() *Idx {
	p.idxParser.ConsumeString(PackIdxSignature)
	p.idxParser.ConsumeBytes([]byte{0, 0, 0, PackVersion})
	var counts [256]int
	for i := range counts {
		counts[i] = int(p.idxParser.ParseIntBigEndian(4))
	}
	//discard the fan-out values, just use the largest value,
	//which is the total # of objects:
	count := counts[255]
	idToEntry := make(map[string]*PackedObjectId)
	entries := make([]*PackedObjectId, count, count)
	entriesByOid := make([]*PackedObjectId, count, count)
	for i := 0; i < count; i++ {
		b := p.idxParser.ReadNBytes(20)
		oid, _ := objects.OidFromBytes(b)
		entries[i] = &PackedObjectId{
			ObjectId: oid,
		}
		entriesByOid[i] = entries[i]
	}
	for i := 0; i < count; i++ {
		entries[i].crc32 = int64(p.idxParser.ParseIntBigEndian(4))
	}
	for i := 0; i < count; i++ {
		//TODO: 8-byte #'s for some offsets for some pack files (packs > 2gb)
		entries[i].offset = p.idxParser.ParseIntBigEndian(4)
	}
	checksumPack := p.idxParser.ReadNBytes(20)
	checksumIdx := p.idxParser.ReadNBytes(20)
	if !p.idxParser.EOF() {
		util.PanicErrf("Found extraneous bytes! %x", p.idxParser.Bytes())
	}
	//order by offset
	sort.Sort(packedObjectIds(entries))
	for i, v := range entries {
		v.index = i
	}
	packChecksum, _ := objects.OidFromBytes(checksumPack)
	idxChecksum, _ := objects.OidFromBytes(checksumIdx)
	return &Idx{
		entries,
		entriesByOid,
		idToEntry,
		&counts,
		int64(count),
		packChecksum,
		idxChecksum,
	}
}
Exemple #12
0
// parseTree performs the parsing of binary data into a Tree
// object, or panics with panicErr if there is a problem parsing.
// For this reason, it should be called as a parameter to
// safeParse().
func (p *objectParser) parseTree() *objects.Tree {
	entries := make([]*objects.TreeEntry, 0)
	p.ResetCount()
	for !p.EOF() {
		mode := p.ParseFileMode(token.SP)
		name := p.ReadString(token.NUL)
		oid := p.ParseOidBytes()
		t := deduceObjectType(mode)
		entry := objects.NewTreeEntry(mode, t, name, oid)
		entries = append(entries, entry)
	}

	if p.Count() != p.hdr.Size() {
		util.PanicErrf("payload of size %d isn't of expected size %d", p.Count(), p.hdr.Size())
	}
	return objects.NewTree(p.oid, p.hdr, entries)
}
Exemple #13
0
func (pack *Pack) unpackFromShortOid(short string) (obj objects.Object, result packSearch) {
	prefix, err := strconv.ParseUint(short[0:2], 16, 8)
	if err != nil {
		util.PanicErrf("invalid short oid; non-hex characters: %s. %s", short, err.Error())
	}
	entries := pack.idx.entriesWithPrefix(byte(prefix))
	if entries == nil {
		return
	}
	var already bool
	for _, oid := range entries {
		if s := oid.String(); strings.HasPrefix(s, short) {
			if already {
				return nil, MultipleObjects
			}
			obj, result = pack.unpack(oid.ObjectId)
			already = true
		}
	}
	return
}
Exemple #14
0
func parseNonDeltaEntry(bytes []byte, pot PackedObjectType, oid *objects.ObjectId, size int64) (po *PackedObject) {
	var (
		dp  *packedObjectParser
		err error
	)
	if dp, err = newPackedObjectParser(bytes, oid); err != nil {
		util.PanicErr(err.Error())
	} else if int64(len(dp.bytes)) != size {
		util.PanicErrf("Expected object of %d bytes but found %d bytes", size, len(dp.bytes))
	}
	switch pot {
	case PackedBlob:
		po = dp.parseBlob(size)
	case PackedCommit:
		po = dp.parseCommit(size)
	case PackedTree:
		po = dp.parseTree(size)
	case PackedTag:
		po = dp.parseTag(size)
	}
	return
}
Exemple #15
0
func (dp *packedObjectParser) applyDelta(base *PackedObject, id *objects.ObjectId) (object *PackedObject) {
	p := dp.objectParser

	baseSize := p.parseIntWhileMSB()
	outputSize := p.parseIntWhileMSB()

	src := base.bytes

	if int(baseSize) != len(src) {
		util.PanicErrf("Expected size of base object is %d, but actual size is %d")
	}

	out := make([]byte, outputSize, outputSize)
	var appended int64
	cmd := p.ReadByte()
	for {
		if cmd == 0 {
			util.PanicErrf("Invalid delta! Byte 0 is not a supported delta code.")
		}
		var offset, len int64
		if cmd&0x80 != 0 {
			//copy from base to output
			offset, len = dp.parseCopyCmd(cmd)
			for i := offset; i < offset+len; i++ {
				out[appended+(i-offset)] = src[i]
			}
			if offset+len > baseSize {
				util.PanicErrf("Bad delta - references byte %d of a %d-byte source", offset+len, baseSize)
				break
			}
		} else {
			//copy from delta to output
			offset, len = 0, int64(cmd)
			for i := offset; i < offset+len; i++ {
				out[appended+(i-offset)] = p.ReadByte()
			}
		}
		appended += len
		if appended < outputSize {
			cmd = p.ReadByte()
		} else {
			break
		}
	}
	if appended != outputSize {
		util.PanicErrf("Expected output of size %d, got %d. \n", outputSize, appended)
	}
	if outputSize != int64(len(out)) {
		util.PanicErrf("Expected output of len %d, got %d. \n", outputSize, len(out))
	}
	outputType := base.object.Header().Type()
	outputParser := NewObjectParser(bufio.NewReader(bytes.NewReader(out)), id)
	outputParser.hdr = objects.NewObjectHeader(outputType, outputSize)
	var obj objects.Object
	switch outputType {
	case objects.ObjectBlob:
		obj = outputParser.parseBlob()
	case objects.ObjectTree:
		obj = outputParser.parseTree()
	case objects.ObjectCommit:
		obj = outputParser.parseCommit()
	case objects.ObjectTag:
		obj = outputParser.parseTag()
	}
	return &PackedObject{
		obj,
		out,
		base.object.ObjectId(),
		base.Depth + 1,
	}
}