Exemple #1
0
func writeAt(w io.WriteSeeker, c int64, p []byte) (int64, error) {
	if _, err := w.Seek(c, 0); err != nil {
		return 0, err
	}
	n, err := w.Write(p)
	return int64(n), err
}
Exemple #2
0
func (t *TLK) WriteJson(w io.WriteSeeker) error {
	out := TlkJson{}
	out.Strings = make(map[string]string, 0)
	out.Sounds = make(map[string]string, 0)

	for idx := 0; idx < t.GetStringCount(); idx++ {
		str, err := t.String(idx)
		stringId := fmt.Sprintf("%d", idx)
		if err != nil {
			return err
		}
		out.Strings[stringId] = str
		entry, err := t.Entry(idx)
		if err != nil {
			return err
		}
		if entry.Sound.Valid() {
			out.Sounds[stringId] = entry.Sound.String()
		}

	}
	bytes, err := json.MarshalIndent(out, "", "\t")
	if err != nil {
		return err
	}

	_, err = w.Write(bytes)

	return err
}
Exemple #3
0
func write(f io.WriteSeeker, s string) error {
	_, err := f.Write([]byte(s))
	if err != nil {
		return err
	}
	_, err = f.Seek(0, 0)
	return err
}
Exemple #4
0
func WriteEmptyInt(w io.WriteSeeker, n int) (pos int64, err error) {
	if pos, err = w.Seek(0, 1); err != nil {
		return
	}
	if err = WriteInt(w, 0, n); err != nil {
		return
	}
	return
}
Exemple #5
0
func (dec decoder) Decode(x interface{}) error {
	err := dec.dec.Decode(x)
	if err != nil {
		return err
	}
	var names []string
	err = dec.dec.Decode(&names)
	if err != nil {
		return err
	}
	files := make(map[string][]*File)
	typeapply.Do(func(f *File) {
		if f != nil && f.Name != "" {
			files[f.Name] = append(files[f.Name], f)
		}
	},
		x)
	for _, name := range names {
		var out io.WriteSeeker
		if files[name] == nil {
			out = nullWriter{}
		} else {
			samefiles := files[name]
			f := samefiles[0]
			if f == nil {
				return errors.New("file not found in manifest")
			}
			f.file, err = ioutil.TempFile("", "filemarshal")
			if err != nil {
				return err
			}
			f.Name = f.file.Name()
			for _, g := range samefiles[1:] {
				*g = *f
			}
			out = f.file
		}
		for {
			buf := []byte(nil)
			err = dec.dec.Decode(&buf)
			if err != nil {
				return err
			}
			if len(buf) == 0 {
				break
			}
			_, err = out.Write(buf)
			if err != nil {
				return err
			}
		}
		out.Seek(0, 0)
	}
	return nil
}
Exemple #6
0
func (s *mockedStorage) download(url string, output io.WriteSeeker) error {
	blob, ok := s.data[url]
	if !ok {
		return ErrDownloadError
	}
	_, err := output.Seek(0, os.SEEK_SET)
	s.c.So(err, ShouldBeNil)
	_, err = output.Write(blob)
	s.c.So(err, ShouldBeNil)
	return nil
}
Exemple #7
0
func (t *TLK) ConvertToUTF8(w io.WriteSeeker) error {
	w.Seek(0, os.SEEK_SET)
	err := binary.Write(w, binary.LittleEndian, t.header)
	if err != nil {
		return err
	}
	curStringOffset := 0
	strArray := []string{}
	for i := 0; i < len(t.entries); i++ {
		str, err := t.String(i)
		if err != nil {
			return err
		}
		strArray = append(strArray, str)
		t.entries[i].Offset = uint32(curStringOffset)
		t.entries[i].Length = uint32(len(str))
		curStringOffset += int(t.entries[i].Length)
		w.Seek(int64(binary.Size(t.header)+binary.Size(t.entries[0])*i), os.SEEK_SET)
		err = binary.Write(w, binary.LittleEndian, t.entries[i])
		if err != nil {
			return err
		}
	}
	w.Seek(int64(t.header.StringOffset), os.SEEK_SET)
	for _, str := range strArray {
		w.Write([]byte(str))
	}

	return nil
}
Exemple #8
0
func (p *Provider) copySparse(dst io.WriteSeeker, src sparse.FileIoProcessor) error {
	extents, err := sparse.GetFiemapExtents(src)
	if err != nil {
		return err
	}

	for _, x := range extents {
		if _, err := dst.Seek(int64(x.Logical), os.SEEK_SET); err != nil {
			return err
		}
		if _, err := io.Copy(dst, io.NewSectionReader(src, int64(x.Logical), int64(x.Length))); err != nil {
			return err
		}
	}

	return nil
}
Exemple #9
0
// WriteData given a storage and a buffer, write the data to the storage and return a pointer to that data
func WriteData(s io.WriteSeeker, b []byte) (Pointer, error) {

	// seek to the end of the storage
	offset, err := s.Seek(0, 2)
	if err != nil {
		return Pointer{}, err
	}

	// write the buffer to storage
	err = writeFull(s, b)
	if err != nil {
		return Pointer{}, err
	}

	return Pointer{
		Addr: uint64(offset),
		Size: uint64(len(b)),
	}, nil
}
Exemple #10
0
Fichier : gpt.go Projet : rekby/gpt
// Calc header and partitions CRC. Save Header and partition entries to the disk.
// It independent of start position: writer will be seek to position from Table.Header.
func (this Table) Write(writer io.WriteSeeker) (err error) {
	this.Header.PartitionsCRC = this.calcPartitionsCRC()
	if headerPos, ok := mul(int64(this.SectorSize), int64(this.Header.HeaderStartLBA)); ok {
		writer.Seek(headerPos, 0)
	}
	err = this.Header.write(writer, true)
	if err != nil {
		return
	}
	if partTablePos, ok := mul(int64(this.SectorSize), int64(this.Header.PartitionsTableStartLBA)); ok {
		writer.Seek(partTablePos, 0)
	}
	for _, part := range this.Partitions {
		err = part.write(writer, this.Header.PartitionEntrySize)
		if err != nil {
			return
		}
	}
	return
}
Exemple #11
0
func growBy(w io.WriteSeeker, n int) {
	var err error
	defer panicIf(&err)

	// Seek to the end.
	_, err = w.Seek(0, 2)
	if err != nil {
		err = fmt.Errorf("Seek: %v", err)
		return
	}

	// Write.
	_, err = w.Write(bytes.Repeat([]byte("a"), n))
	if err != nil {
		err = fmt.Errorf("Write: %v", err)
		return
	}

	return
}
Exemple #12
0
func writeBlock(f io.WriteSeeker, b *Block, index int) error {
	if b.right > payloadSize {
		return ErrPayloadSizeTooLarge
	}
	if b.left != 0 {
		panic("block is not left aligned")
	}
	if err := seekToIndex(f, index); err != nil {
		return err
	}

	crcBuf := make([]byte, crc32Len+len(b.Payload()))
	binary.BigEndian.PutUint32(crcBuf, b.CRC())
	copy(crcBuf[crc32Len:], b.Payload())
	_, err := f.Write(crcBuf)
	if err != nil {
		return err
	}
	return nil
}
Exemple #13
0
// writeChunk writes a chunk to the given stream.
func writeChunk(w io.WriteSeeker, cd *ChunkDescriptor, offset int) error {
	// Jump to chunk sector.
	_, err := w.Seek(int64(offset)*sectorSize, 0)
	if err != nil {
		return err
	}

	// Write compressed data size.
	err = writeU32(w, uint32(len(cd.data)))
	if err != nil {
		return err
	}

	// Write compression scheme.
	err = writeU8(w, ZLib)
	if err != nil {
		return err
	}

	// Write compressed data.
	_, err = w.Write(cd.data)
	if err != nil {
		return err
	}

	// Pad data
	padding := (cd.SectorCount() * sectorSize) - len(cd.data)
	_, err = w.Write(make([]byte, padding))
	return err
}
Exemple #14
0
// writeBlock writes a full or partial block into ws. len(p) must be smaller than (bs - crc32Len).
// Each block contains a crc32Len bytes crc32c of the payload and a (bs-crc32Len)
// bytes payload. Any error encountered is returned.
// It is caller's responsibility to ensure that only the last block in the file has
// len(p) < bs - crc32Len
func writeBlock(ws io.WriteSeeker, index, bs int64, p []byte) error {
	if int64(len(p)) > bs-crc32Len {
		return ErrPayloadSizeTooLarge
	}

	// seek to the beginning of the block
	_, err := ws.Seek(index*bs, os.SEEK_SET)
	if err != nil {
		return err
	}

	// write crc32c
	// TODO: reuse buffer
	b := make([]byte, crc32Len)
	binary.BigEndian.PutUint32(b, crc32.Checksum(p, crc32cTable))
	_, err = ws.Write(b)
	if err != nil {
		return err
	}

	// write payload
	_, err = ws.Write(p)
	if err != nil {
		return err
	}
	return nil
}
Exemple #15
0
// writeHeader writes header data into the given writer.
func writeHeader(w io.WriteSeeker, set []*ChunkDescriptor) error {
	var locations, timestamps [sectorSize]byte

	offset := 2 // Skip first two sectors for the header.

	for _, cd := range set {
		if cd == nil {
			continue
		}

		sectors := cd.SectorCount()
		writeOffset(locations[:], cd.X, cd.Z, offset, sectors)
		writeTimestamp(timestamps[:], cd.X, cd.Z, cd.LastModified)

		offset += sectors
	}

	_, err := w.Seek(0, 0)
	if err != nil {
		return err
	}

	_, err = w.Write(locations[:])
	if err != nil {
		return err
	}

	_, err = w.Write(timestamps[:])
	return err
}
Exemple #16
0
// NewWriter opens a CDB database for the given io.WriteSeeker.
//
// If hasher is nil, it will default to the CDB hash function.
func NewWriter(writer io.WriteSeeker, hasher hash.Hash32) (*Writer, error) {
	// Leave 256 * 8 bytes for the index at the head of the file.
	_, err := writer.Seek(0, os.SEEK_SET)
	if err != nil {
		return nil, err
	}

	_, err = writer.Write(make([]byte, indexSize))
	if err != nil {
		return nil, err
	}

	if hasher == nil {
		hasher = newCDBHash()
	}

	return &Writer{
		hasher:         hasher,
		writer:         writer,
		bufferedWriter: bufio.NewWriterSize(writer, 65536),
		bufferedOffset: indexSize,
	}, nil
}
Exemple #17
0
func goDecoder(gen *BinMatMN, w io.WriteSeeker, fsize int64, n int) (chan []uint32, chan struct{}) {
	word := int64(W >> 3)
	size := arrangeSize(fsize, int64(n))
	msg := make(chan []uint32, 10)
	closed := make(chan struct{})
	go func() {
		var err error
		pb := make([]byte, word)
		var i int64
		for m := range msg {
			//ch <- gen.Mul(msg)
			ps := m
			for j, p := range ps {
				loc := int64(j)*size + i*word
				if loc > fsize {
					continue
				}
				if _, err = w.Seek(loc, os.SEEK_SET); err != nil {
					log.Fatal(err)
				}
				binary.LittleEndian.PutUint32(pb, p)
				remain := (fsize - loc)
				wb := pb
				if remain < word {
					wb = wb[:remain]
				}
				if _, err := w.Write(wb); err != nil {
					log.Fatal(err)
				}
			}
			i++
		}
		closed <- struct{}{}
	}()
	return msg, closed
}
Exemple #18
0
// Write out a Prog header to an elf with a given destination
// Writes out `p` to `sw` at `dst` using information from `f`
func writephdr(f *ELF.File, dst int64, sw io.WriteSeeker, p *ELF.Prog) error {
	sw.Seek(dst, os.SEEK_SET)

	switch f.Class {
	case ELF.ELFCLASS32:
		hdr := ELF.Prog32{
			Type:   uint32(p.Type),
			Flags:  uint32(p.Flags),
			Off:    uint32(p.Off),
			Vaddr:  uint32(p.Vaddr),
			Paddr:  uint32(p.Paddr),
			Filesz: uint32(p.Filesz),
			Memsz:  uint32(p.Memsz),
			Align:  uint32(p.Align),
		}
		if err := binary.Write(sw, f.ByteOrder, hdr); err != nil {
			return err
		}

	case ELF.ELFCLASS64:
		hdr := ELF.Prog64{
			Type:   uint32(p.Type),
			Flags:  uint32(p.Flags),
			Off:    p.Off,
			Vaddr:  p.Vaddr,
			Paddr:  p.Paddr,
			Filesz: p.Filesz,
			Memsz:  p.Memsz,
			Align:  p.Align,
		}
		if err := binary.Write(sw, f.ByteOrder, hdr); err != nil {
			return err
		}
	}
	return nil
}
Exemple #19
0
func RefillInt(w io.WriteSeeker, pos int64, val int, n int) (err error) {
	var curPos int64
	if curPos, err = w.Seek(0, 1); err != nil {
		return
	}
	if _, err = w.Seek(pos, 0); err != nil {
		return
	}
	if err = WriteInt(w, val, n); err != nil {
		return
	}
	if _, err = w.Seek(curPos, 0); err != nil {
		return
	}
	return
}
Exemple #20
0
// writeMutation encodes a mutation into a block and appends the
// block to the writer returning an error. Seek offset is unmodified.
func writeMutation(w io.WriteSeeker, mut string) (err error) {
	b := newBlock(mut)

	offset, _ := w.Seek(0, 1) // remember where we are.
	// We use two write calls bacause we use encoding/binary
	// to write the fixed length header.
	err = binary.Write(w, binary.LittleEndian, b.Hdr)
	if err != nil {
		return
	}

	// We'we written the header successfully, write the rest
	// of the data.
	_, err = w.Write(b.Data)
	w.Seek(offset, 0) // rewind so we don't break readMutation().
	return
}
Exemple #21
0
func (t *TLK) Write(w io.WriteSeeker) error {

	w.Seek(0, os.SEEK_SET)
	err := binary.Write(w, binary.LittleEndian, t.header)
	if err != nil {
		return err
	}
	err = binary.Write(w, binary.LittleEndian, t.entries)
	if err != nil {
		return err
	}
	w.Seek(0, os.SEEK_CUR)
	_, err = w.Write(t.stringBuf)
	if err != nil {
		return err
	}
	return nil

}
Exemple #22
0
//MakeFactory creates CDB and returns an adder function which should be called
//with each Element, and a closer, which finalizes the CDB.
func MakeFactory(w io.WriteSeeker) (adder AdderFunc, closer CloserFunc, err error) {
	defer func() { // Centralize error handling.
		if e := recover(); e != nil {
			logger.Panicf("error: %s", e)
			err = e.(error)
		}
	}()

	if _, err = w.Seek(int64(headerSize), 0); err != nil {
		logger.Panicf("cannot seek to %d of %s: %s", headerSize, w, err)
	}
	buf := make([]byte, 8)
	wb := bufio.NewWriter(w)
	hash := cdbHash()
	hw := io.MultiWriter(hash, wb) // Computes hash when writing record key.
	htables := make(map[uint32][]slot)
	poshold := &posHolder{headerSize}

	// Read all records and write to output.
	adder = func(elt Element) error {
		var (
			err        error
			klen, dlen uint32
			n          int
		)
		klen, dlen = uint32(len(elt.Key)), uint32(len(elt.Data))
		writeNums(wb, klen, dlen, buf)
		hash.Reset()
		if n, err = hw.Write(elt.Key); err == nil && uint32(n) != klen {
			logger.Printf("klen=%d written=%d", klen, n)
		} else if err != nil {
			logger.Panicf("error writing key %s: %s", elt.Key, err)
			return err
		}
		if n, err = wb.Write(elt.Data); err == nil && uint32(n) != dlen {
			logger.Printf("dlen=%d written=%d", dlen, n)
		} else if err != nil {
			logger.Panicf("error writing data: %s", err)
			return err
		}
		h := hash.Sum32()
		tableNum := h % 256
		htables[tableNum] = append(htables[tableNum], slot{h, poshold.pos})
		poshold.pos += 8 + klen + dlen
		return nil
	}

	closer = func() error {
		var err error
		if err = wb.Flush(); err != nil {
			logger.Panicf("cannot flush %+v: %s", wb, err)
			return err
		}
		//if p, err := w.Seek(0, 1); err != nil || int64(pos) != p {
		//	logger.Panicf("Thought I've written pos=%d bytes, but the actual position is %d! (error? %s)", pos, p, err)
		//}

		// Write hash tables and header.

		// Create and reuse a single hash table.
		pos := poshold.pos
		maxSlots := 0
		for _, slots := range htables {
			if len(slots) > maxSlots {
				maxSlots = len(slots)
			}
		}
		slotTable := make([]slot, maxSlots*2)

		header := make([]byte, headerSize)
		// Write hash tables.
		for i := uint32(0); i < 256; i++ {
			slots := htables[i]
			if slots == nil {
				putNum(header[i*8:], pos)
				continue
			}

			nslots := uint32(len(slots) * 2)
			hashSlotTable := slotTable[:nslots]
			// Reset table slots.
			for j := 0; j < len(hashSlotTable); j++ {
				hashSlotTable[j].h = 0
				hashSlotTable[j].pos = 0
			}

			for _, slot := range slots {
				slotPos := (slot.h / 256) % nslots
				for hashSlotTable[slotPos].pos != 0 {
					slotPos++
					if slotPos == uint32(len(hashSlotTable)) {
						slotPos = 0
					}
				}
				hashSlotTable[slotPos] = slot
			}

			if err = writeSlots(wb, hashSlotTable, buf); err != nil {
				logger.Panicf("cannot write slots: %s", err)
			}

			putNum(header[i*8:], pos)
			putNum(header[i*8+4:], nslots)
			pos += 8 * nslots
		}

		if err = wb.Flush(); err != nil {
			logger.Panicf("error flushing %s: %s", wb, err)
		}

		if _, err = w.Seek(0, 0); err != nil {
			logger.Panicf("error seeking to begin of %s: %s", w, err)
		}

		if _, err = w.Write(header); err != nil {
			logger.Panicf("cannot write header: %s", err)
		}
		return err
	}

	return adder, closer, nil
}
Exemple #23
0
func ConvertToBIFL(r io.ReadSeeker, w io.WriteSeeker) error {
	r.Seek(0, os.SEEK_SET)
	w.Seek(0, os.SEEK_SET)
	bif, err := OpenBif(r)

	bif.Header.Signature = [4]byte{'B', 'I', 'F', 'L'}
	bif.Header.Version = [4]byte{'V', '1', '.', '0'}
	err = binary.Write(w, binary.LittleEndian, bif.Header)
	if err != nil {
		return err
	}
	err = binary.Write(w, binary.LittleEndian, bif.VariableEntries)
	if err != nil {
		return err
	}
	err = binary.Write(w, binary.LittleEndian, bif.FixedEntries)
	if err != nil {
		return err
	}
	outOffset := binary.Size(bif.Header)
	dataOffset := uint32(outOffset + binary.Size(bif.VariableEntries) + binary.Size(bif.FixedEntries))
	for _, entry := range bif.VariableEntries {
		dataIn := make([]byte, entry.Size)
		var lzmaOut bytes.Buffer
		out := lzma.NewWriter(&lzmaOut)

		r.Seek(int64(entry.Offset), os.SEEK_SET)
		io.ReadAtLeast(r, dataIn, len(dataIn))
		out.Write(dataIn)
		out.Close()

		w.Seek(int64(outOffset), os.SEEK_SET)
		entry.Offset = dataOffset
		binary.Write(w, binary.LittleEndian, entry)
		w.Seek(int64(dataOffset), os.SEEK_SET)
		compressedSize := uint32(lzmaOut.Len())
		if compressedSize < entry.Size {
			binary.Write(w, binary.LittleEndian, compressedSize)
			lzmaOut.WriteTo(w)
			// Length of compressed data plus 4 bytes for our 32bit int
			dataOffset += (compressedSize + 4)
		} else {
			fmt.Printf("Compressed size is larger: %d > %d\n", compressedSize, entry.Size)
			binary.Write(w, binary.LittleEndian, uint32(0))
			binary.Write(w, binary.LittleEndian, dataIn)
			dataOffset += (entry.Size + 4)
		}
		outOffset += binary.Size(bif.VariableEntries[0])
	}
	for _, entry := range bif.FixedEntries {
		b := make([]byte, entry.Size*entry.Number)
		//out := lzma.NewWriter(&b)

		r.Seek(int64(entry.Offset), os.SEEK_SET)
		io.ReadAtLeast(r, b, len(b))

		w.Seek(int64(outOffset), os.SEEK_SET)
		entry.Offset = dataOffset
		binary.Write(w, binary.LittleEndian, entry)
		w.Seek(int64(dataOffset), os.SEEK_SET)
		w.Write(b)
		outOffset += binary.Size(bif.FixedEntries[0])
		dataOffset += uint32(len(b))
	}
	return nil
}
Exemple #24
0
func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
	var lenf int
	I := qsufsort(obuf)
	db := make([]byte, len(nbuf))
	eb := make([]byte, len(nbuf))
	var dblen, eblen int

	var hdr header
	hdr.Magic = magic
	hdr.NewSize = int64(len(nbuf))
	err := binary.Write(patch, signMagLittleEndian{}, &hdr)
	if err != nil {
		return err
	}

	// Compute the differences, writing ctrl as we go
	pfbz2, err := gobzip.NewBzipWriter(patch)
	if err != nil {
		return err
	}
	var scan, pos, length int
	var lastscan, lastpos, lastoffset int
	for scan < len(nbuf) {
		var oldscore int
		scan += length
		for scsc := scan; scan < len(nbuf); scan++ {
			pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))

			for ; scsc < scan+length; scsc++ {
				if scsc+lastoffset < len(obuf) &&
					obuf[scsc+lastoffset] == nbuf[scsc] {
					oldscore++
				}
			}

			if (length == oldscore && length != 0) || length > oldscore+8 {
				break
			}

			if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
				oldscore--
			}
		}

		if length != oldscore || scan == len(nbuf) {
			var s, Sf int
			lenf = 0
			for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
				if obuf[lastpos+i] == nbuf[lastscan+i] {
					s++
				}
				i++
				if s*2-i > Sf*2-lenf {
					Sf = s
					lenf = i
				}
			}

			lenb := 0
			if scan < len(nbuf) {
				var s, Sb int
				for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
					if obuf[pos-i] == nbuf[scan-i] {
						s++
					}
					if s*2-i > Sb*2-lenb {
						Sb = s
						lenb = i
					}
				}
			}

			if lastscan+lenf > scan-lenb {
				overlap := (lastscan + lenf) - (scan - lenb)
				s := 0
				Ss := 0
				lens := 0
				for i := 0; i < overlap; i++ {
					if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
						s++
					}
					if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
						s--
					}
					if s > Ss {
						Ss = s
						lens = i + 1
					}
				}

				lenf += lens - overlap
				lenb -= lens
			}

			for i := 0; i < lenf; i++ {
				db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
			}
			for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
				eb[eblen+i] = nbuf[lastscan+lenf+i]
			}

			dblen += lenf
			eblen += (scan - lenb) - (lastscan + lenf)

			err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
			if err != nil {
				pfbz2.Close()
				return err
			}

			val := (scan - lenb) - (lastscan + lenf)
			err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
			if err != nil {
				pfbz2.Close()
				return err
			}

			val = (pos - lenb) - (lastpos + lenf)
			err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
			if err != nil {
				pfbz2.Close()
				return err
			}

			lastscan = scan - lenb
			lastpos = pos - lenb
			lastoffset = pos - scan
		}
	}
	err = pfbz2.Close()
	if err != nil {
		return err
	}

	// Compute size of compressed ctrl data
	l64, err := patch.Seek(0, 1)
	if err != nil {
		return err
	}
	hdr.CtrlLen = int64(l64 - 32)

	// Write compressed diff data
	pfbz2, err = gobzip.NewBzipWriter(patch)
	if err != nil {
		return err
	}
	n, err := pfbz2.Write(db[:dblen])
	if err != nil {
		pfbz2.Close()
		return err
	}
	if n != dblen {
		pfbz2.Close()
		return io.ErrShortWrite
	}
	err = pfbz2.Close()
	if err != nil {
		return err
	}

	// Compute size of compressed diff data
	n64, err := patch.Seek(0, 1)
	if err != nil {
		return err
	}
	hdr.DiffLen = n64 - l64

	// Write compressed extra data
	pfbz2, err = gobzip.NewBzipWriter(patch)
	if err != nil {
		return err
	}
	n, err = pfbz2.Write(eb[:eblen])
	if err != nil {
		pfbz2.Close()
		return err
	}
	if n != eblen {
		pfbz2.Close()
		return io.ErrShortWrite
	}
	err = pfbz2.Close()
	if err != nil {
		return err
	}

	// Seek to the beginning, write the header, and close the file
	_, err = patch.Seek(0, 0)
	if err != nil {
		return err
	}
	err = binary.Write(patch, signMagLittleEndian{}, &hdr)
	if err != nil {
		return err
	}
	return nil
}
Exemple #25
0
func (p *MemoryPartition) Compact(w io.WriteSeeker) error {
	if !p.readOnly {
		return errorPartitionNotReadyOnly
	}

	meta := map[metaKey]metaValue{}

	gzipWriter := gzip.NewWriter(w)

	sources := []string{}
	metricsBySource := map[string][]string{}

	for sourceName, source := range p.sources {
		sources = append(sources, sourceName)

		for metricName, metric := range source.metrics {
			metricsBySource[sourceName] = append(metricsBySource[sourceName], metricName)

			extents := splitIntoExtents(metric.points)

			currentOffset, err := w.Seek(0, 1)
			if err != nil {
				return err
			}

			metaVal := metaValue{
				offset:    currentOffset,
				numPoints: uint32(len(metric.points)),
			}

			for extentIndex, ext := range extents {
				gzipWriter.Close()
				gzipWriter = gzip.NewWriter(w)
				currentOffset, err := w.Seek(0, 1)
				if err != nil {
					return err
				}

				ext.offset = currentOffset

				err = binary.Write(gzipWriter, binary.LittleEndian, ext.points)
				if err != nil {
					return err
				}

				err = gzipWriter.Flush()
				if err != nil {
					return err
				}

				extents[extentIndex] = ext
			}

			metaVal.extents = extents

			meta[metaKey{sourceName, metricName}] = metaVal
		}
	}

	err := gzipWriter.Close()
	if err != nil {
		return err
	}

	metaStartOffset, err := w.Seek(0, 1)
	if err != nil {
		return err
	}

	// Start writing metadata
	// Magic sequence
	err = binary.Write(w, binary.BigEndian, disk.Magic)
	if err != nil {
		return err
	}

	err = binary.Write(w, binary.LittleEndian, p.minTS)
	if err != nil {
		return err
	}

	err = binary.Write(w, binary.LittleEndian, p.maxTS)
	if err != nil {
		return err
	}

	// Encode the number of sources
	err = binary.Write(w, binary.LittleEndian, uint16(len(sources)))
	if err != nil {
		return err
	}

	sort.Strings(sources)

	for _, sourceName := range sources {
		err = binary.Write(w, binary.LittleEndian, uint8(len(sourceName)))
		if err != nil {
			return err
		}

		_, err = w.Write([]byte(sourceName))
		if err != nil {
			return err
		}

		metrics := metricsBySource[sourceName]
		sort.Strings(metrics)

		// Encode number of metrics
		err = binary.Write(w, binary.LittleEndian, uint16(len(metrics)))
		if err != nil {
			return err
		}

		for _, metricName := range metrics {
			err = binary.Write(w, binary.LittleEndian, uint8(len(metricName)))
			if err != nil {
				return err
			}

			_, err = w.Write([]byte(metricName))
			if err != nil {
				return err
			}

			metadata := meta[metaKey{sourceName, metricName}]

			err = binary.Write(w, binary.LittleEndian, metadata.offset)
			if err != nil {
				return err
			}

			err = binary.Write(w, binary.LittleEndian, uint32(metadata.numPoints))
			if err != nil {
				return err
			}

			err = binary.Write(w, binary.LittleEndian, uint32(len(metadata.extents)))
			if err != nil {
				return err
			}

			for _, ext := range metadata.extents {
				err = binary.Write(w, binary.LittleEndian, ext.startTS)
				if err != nil {
					return err
				}

				err = binary.Write(w, binary.LittleEndian, ext.offset)
				if err != nil {
					return err
				}

				err = binary.Write(w, binary.LittleEndian, ext.numPoints)
				if err != nil {
					return err
				}
			}
		}
	}

	err = binary.Write(w, binary.LittleEndian, metaStartOffset)
	if err != nil {
		return err
	}

	return nil
}
Exemple #26
0
func (s *storageImpl) download(url string, output io.WriteSeeker) error {
	// reportProgress print fetch progress, throttling the reports rate.
	var prevProgress int64 = 1000 // >100%
	var prevReportTs time.Time
	reportProgress := func(read int64, total int64) {
		now := s.client.clock.now()
		progress := read * 100 / total
		if progress < prevProgress || read == total || now.Sub(prevReportTs) > downloadReportInterval {
			s.client.Logger.Infof("cipd: fetching - %d%%", progress)
			prevReportTs = now
			prevProgress = progress
		}
	}

	// download is a separate function to be able to use deferred close.
	download := func(out io.WriteSeeker, src io.ReadCloser, totalLen int64) error {
		defer src.Close()
		s.client.Logger.Infof("cipd: about to fetch %.1f Mb", float32(totalLen)/1024.0/1024.0)
		reportProgress(0, totalLen)
		_, err := io.Copy(out, &readerWithProgress{
			reader:   src,
			callback: func(read int64) { reportProgress(read, totalLen) },
		})
		if err == nil {
			s.client.Logger.Infof("cipd: fetch finished successfully")
		}
		return err
	}

	// Download the actual data (several attempts).
	for attempt := 0; attempt < downloadMaxAttempts; attempt++ {
		// Rewind output to zero offset.
		_, err := output.Seek(0, os.SEEK_SET)
		if err != nil {
			return err
		}

		// Send the request.
		s.client.Logger.Infof("cipd: initiating the fetch")
		var req *http.Request
		var resp *http.Response
		req, err = http.NewRequest("GET", url, nil)
		if err != nil {
			return err
		}
		req.Header.Set("User-Agent", s.client.UserAgent)
		resp, err = s.client.doAnonymousHTTPRequest(req)
		if err != nil {
			if isTemporaryNetError(err) {
				s.client.Logger.Warningf("cipd: transient network error: %s", err)
				continue
			}
			return err
		}

		// Transient error, retry.
		if isTemporaryHTTPError(resp.StatusCode) {
			resp.Body.Close()
			s.client.Logger.Warningf("cipd: transient HTTP error %d while fetching the file", resp.StatusCode)
			continue
		}

		// Fatal error, abort.
		if resp.StatusCode >= 400 {
			resp.Body.Close()
			return fmt.Errorf("Server replied with HTTP code %d", resp.StatusCode)
		}

		// Try to fetch (will close resp.Body when done).
		err = download(output, resp.Body, resp.ContentLength)
		if err != nil {
			s.client.Logger.Warningf("cipd: transient error fetching the file: %s", err)
			continue
		}

		// Success.
		return nil
	}

	return ErrDownloadError
}
Exemple #27
0
// Make reads cdb-formatted records from r and writes a cdb-format database
// to w.  See the documentation for Dump for details on the input record format.
func Make(w io.WriteSeeker, r io.Reader) (err error) {
	defer func() { // Centralize error handling.
		if e := recover(); e != nil {
			err = e.(error)
		}
	}()

	if _, err = w.Seek(int64(headerSize), 0); err != nil {
		return
	}

	buf := make([]byte, 8)
	rb := bufio.NewReader(r)
	wb := bufio.NewWriter(w)
	hash := cdbHash()
	hw := io.MultiWriter(hash, wb) // Computes hash when writing record key.
	rr := &recReader{rb}
	htables := make(map[uint32][]slot)
	pos := headerSize
	// Read all records and write to output.
	for {
		// Record format is "+klen,dlen:key->data\n"
		c := rr.readByte()
		if c == '\n' { // end of records
			break
		}
		if c != '+' {
			return BadFormatError
		}
		klen, dlen := rr.readNum(','), rr.readNum(':')
		writeNums(wb, klen, dlen, buf)
		hash.Reset()
		rr.copyn(hw, klen)
		rr.eatByte('-')
		rr.eatByte('>')
		rr.copyn(wb, dlen)
		rr.eatByte('\n')
		h := hash.Sum32()
		tableNum := h % 256
		htables[tableNum] = append(htables[tableNum], slot{h, pos})
		pos += 8 + klen + dlen
	}

	// Write hash tables and header.

	// Create and reuse a single hash table.
	maxSlots := 0
	for _, slots := range htables {
		if len(slots) > maxSlots {
			maxSlots = len(slots)
		}
	}
	slotTable := make([]slot, maxSlots*2)

	header := make([]byte, headerSize)
	// Write hash tables.
	for i := uint32(0); i < 256; i++ {
		slots := htables[i]
		if slots == nil {
			putNum(header[i*8:], pos)
			continue
		}

		nslots := uint32(len(slots) * 2)
		hashSlotTable := slotTable[:nslots]
		// Reset table slots.
		for j := 0; j < len(hashSlotTable); j++ {
			hashSlotTable[j].h = 0
			hashSlotTable[j].pos = 0
		}

		for _, slot := range slots {
			slotPos := (slot.h / 256) % nslots
			for hashSlotTable[slotPos].pos != 0 {
				slotPos++
				if slotPos == uint32(len(hashSlotTable)) {
					slotPos = 0
				}
			}
			hashSlotTable[slotPos] = slot
		}

		if err = writeSlots(wb, hashSlotTable, buf); err != nil {
			return
		}

		putNum(header[i*8:], pos)
		putNum(header[i*8+4:], nslots)
		pos += 8 * nslots
	}

	if err = wb.Flush(); err != nil {
		return
	}

	if _, err = w.Seek(0, 0); err != nil {
		return
	}

	_, err = w.Write(header)

	return
}