func (t *TLK) ConvertToUTF8(w io.WriteSeeker) error { w.Seek(0, os.SEEK_SET) err := binary.Write(w, binary.LittleEndian, t.header) if err != nil { return err } curStringOffset := 0 strArray := []string{} for i := 0; i < len(t.entries); i++ { str, err := t.String(i) if err != nil { return err } strArray = append(strArray, str) t.entries[i].Offset = uint32(curStringOffset) t.entries[i].Length = uint32(len(str)) curStringOffset += int(t.entries[i].Length) w.Seek(int64(binary.Size(t.header)+binary.Size(t.entries[0])*i), os.SEEK_SET) err = binary.Write(w, binary.LittleEndian, t.entries[i]) if err != nil { return err } } w.Seek(int64(t.header.StringOffset), os.SEEK_SET) for _, str := range strArray { w.Write([]byte(str)) } return nil }
func (t *TLK) WriteJson(w io.WriteSeeker) error { out := TlkJson{} out.Strings = make(map[string]string, 0) out.Sounds = make(map[string]string, 0) for idx := 0; idx < t.GetStringCount(); idx++ { str, err := t.String(idx) stringId := fmt.Sprintf("%d", idx) if err != nil { return err } out.Strings[stringId] = str entry, err := t.Entry(idx) if err != nil { return err } if entry.Sound.Valid() { out.Sounds[stringId] = entry.Sound.String() } } bytes, err := json.MarshalIndent(out, "", "\t") if err != nil { return err } _, err = w.Write(bytes) return err }
// writeBlock writes a full or partial block into ws. len(p) must be smaller than (bs - crc32Len). // Each block contains a crc32Len bytes crc32c of the payload and a (bs-crc32Len) // bytes payload. Any error encountered is returned. // It is caller's responsibility to ensure that only the last block in the file has // len(p) < bs - crc32Len func writeBlock(ws io.WriteSeeker, index, bs int64, p []byte) error { if int64(len(p)) > bs-crc32Len { return ErrPayloadSizeTooLarge } // seek to the beginning of the block _, err := ws.Seek(index*bs, os.SEEK_SET) if err != nil { return err } // write crc32c // TODO: reuse buffer b := make([]byte, crc32Len) binary.BigEndian.PutUint32(b, crc32.Checksum(p, crc32cTable)) _, err = ws.Write(b) if err != nil { return err } // write payload _, err = ws.Write(p) if err != nil { return err } return nil }
func writeAt(w io.WriteSeeker, c int64, p []byte) (int64, error) { if _, err := w.Seek(c, 0); err != nil { return 0, err } n, err := w.Write(p) return int64(n), err }
// writeChunk writes a chunk to the given stream. func writeChunk(w io.WriteSeeker, cd *ChunkDescriptor, offset int) error { // Jump to chunk sector. _, err := w.Seek(int64(offset)*sectorSize, 0) if err != nil { return err } // Write compressed data size. err = writeU32(w, uint32(len(cd.data))) if err != nil { return err } // Write compression scheme. err = writeU8(w, ZLib) if err != nil { return err } // Write compressed data. _, err = w.Write(cd.data) if err != nil { return err } // Pad data padding := (cd.SectorCount() * sectorSize) - len(cd.data) _, err = w.Write(make([]byte, padding)) return err }
// writeHeader writes header data into the given writer. func writeHeader(w io.WriteSeeker, set []*ChunkDescriptor) error { var locations, timestamps [sectorSize]byte offset := 2 // Skip first two sectors for the header. for _, cd := range set { if cd == nil { continue } sectors := cd.SectorCount() writeOffset(locations[:], cd.X, cd.Z, offset, sectors) writeTimestamp(timestamps[:], cd.X, cd.Z, cd.LastModified) offset += sectors } _, err := w.Seek(0, 0) if err != nil { return err } _, err = w.Write(locations[:]) if err != nil { return err } _, err = w.Write(timestamps[:]) return err }
func write(f io.WriteSeeker, s string) error { _, err := f.Write([]byte(s)) if err != nil { return err } _, err = f.Seek(0, 0) return err }
func (dec decoder) Decode(x interface{}) error { err := dec.dec.Decode(x) if err != nil { return err } var names []string err = dec.dec.Decode(&names) if err != nil { return err } files := make(map[string][]*File) typeapply.Do(func(f *File) { if f != nil && f.Name != "" { files[f.Name] = append(files[f.Name], f) } }, x) for _, name := range names { var out io.WriteSeeker if files[name] == nil { out = nullWriter{} } else { samefiles := files[name] f := samefiles[0] if f == nil { return errors.New("file not found in manifest") } f.file, err = ioutil.TempFile("", "filemarshal") if err != nil { return err } f.Name = f.file.Name() for _, g := range samefiles[1:] { *g = *f } out = f.file } for { buf := []byte(nil) err = dec.dec.Decode(&buf) if err != nil { return err } if len(buf) == 0 { break } _, err = out.Write(buf) if err != nil { return err } } out.Seek(0, 0) } return nil }
func (s *mockedStorage) download(url string, output io.WriteSeeker) error { blob, ok := s.data[url] if !ok { return ErrDownloadError } _, err := output.Seek(0, os.SEEK_SET) s.c.So(err, ShouldBeNil) _, err = output.Write(blob) s.c.So(err, ShouldBeNil) return nil }
// writeMutation encodes a mutation into a block and appends the // block to the writer returning an error. Seek offset is unmodified. func writeMutation(w io.WriteSeeker, mut string) (err error) { b := newBlock(mut) offset, _ := w.Seek(0, 1) // remember where we are. // We use two write calls bacause we use encoding/binary // to write the fixed length header. err = binary.Write(w, binary.LittleEndian, b.Hdr) if err != nil { return } // We'we written the header successfully, write the rest // of the data. _, err = w.Write(b.Data) w.Seek(offset, 0) // rewind so we don't break readMutation(). return }
func (t *TLK) Write(w io.WriteSeeker) error { w.Seek(0, os.SEEK_SET) err := binary.Write(w, binary.LittleEndian, t.header) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, t.entries) if err != nil { return err } w.Seek(0, os.SEEK_CUR) _, err = w.Write(t.stringBuf) if err != nil { return err } return nil }
func growBy(w io.WriteSeeker, n int) { var err error defer panicIf(&err) // Seek to the end. _, err = w.Seek(0, 2) if err != nil { err = fmt.Errorf("Seek: %v", err) return } // Write. _, err = w.Write(bytes.Repeat([]byte("a"), n)) if err != nil { err = fmt.Errorf("Write: %v", err) return } return }
func writeBlock(f io.WriteSeeker, b *Block, index int) error { if b.right > payloadSize { return ErrPayloadSizeTooLarge } if b.left != 0 { panic("block is not left aligned") } if err := seekToIndex(f, index); err != nil { return err } crcBuf := make([]byte, crc32Len+len(b.Payload())) binary.BigEndian.PutUint32(crcBuf, b.CRC()) copy(crcBuf[crc32Len:], b.Payload()) _, err := f.Write(crcBuf) if err != nil { return err } return nil }
// NewWriter opens a CDB database for the given io.WriteSeeker. // // If hasher is nil, it will default to the CDB hash function. func NewWriter(writer io.WriteSeeker, hasher hash.Hash32) (*Writer, error) { // Leave 256 * 8 bytes for the index at the head of the file. _, err := writer.Seek(0, os.SEEK_SET) if err != nil { return nil, err } _, err = writer.Write(make([]byte, indexSize)) if err != nil { return nil, err } if hasher == nil { hasher = newCDBHash() } return &Writer{ hasher: hasher, writer: writer, bufferedWriter: bufio.NewWriterSize(writer, 65536), bufferedOffset: indexSize, }, nil }
func goDecoder(gen *BinMatMN, w io.WriteSeeker, fsize int64, n int) (chan []uint32, chan struct{}) { word := int64(W >> 3) size := arrangeSize(fsize, int64(n)) msg := make(chan []uint32, 10) closed := make(chan struct{}) go func() { var err error pb := make([]byte, word) var i int64 for m := range msg { //ch <- gen.Mul(msg) ps := m for j, p := range ps { loc := int64(j)*size + i*word if loc > fsize { continue } if _, err = w.Seek(loc, os.SEEK_SET); err != nil { log.Fatal(err) } binary.LittleEndian.PutUint32(pb, p) remain := (fsize - loc) wb := pb if remain < word { wb = wb[:remain] } if _, err := w.Write(wb); err != nil { log.Fatal(err) } } i++ } closed <- struct{}{} }() return msg, closed }
func ConvertToBIFL(r io.ReadSeeker, w io.WriteSeeker) error { r.Seek(0, os.SEEK_SET) w.Seek(0, os.SEEK_SET) bif, err := OpenBif(r) bif.Header.Signature = [4]byte{'B', 'I', 'F', 'L'} bif.Header.Version = [4]byte{'V', '1', '.', '0'} err = binary.Write(w, binary.LittleEndian, bif.Header) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, bif.VariableEntries) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, bif.FixedEntries) if err != nil { return err } outOffset := binary.Size(bif.Header) dataOffset := uint32(outOffset + binary.Size(bif.VariableEntries) + binary.Size(bif.FixedEntries)) for _, entry := range bif.VariableEntries { dataIn := make([]byte, entry.Size) var lzmaOut bytes.Buffer out := lzma.NewWriter(&lzmaOut) r.Seek(int64(entry.Offset), os.SEEK_SET) io.ReadAtLeast(r, dataIn, len(dataIn)) out.Write(dataIn) out.Close() w.Seek(int64(outOffset), os.SEEK_SET) entry.Offset = dataOffset binary.Write(w, binary.LittleEndian, entry) w.Seek(int64(dataOffset), os.SEEK_SET) compressedSize := uint32(lzmaOut.Len()) if compressedSize < entry.Size { binary.Write(w, binary.LittleEndian, compressedSize) lzmaOut.WriteTo(w) // Length of compressed data plus 4 bytes for our 32bit int dataOffset += (compressedSize + 4) } else { fmt.Printf("Compressed size is larger: %d > %d\n", compressedSize, entry.Size) binary.Write(w, binary.LittleEndian, uint32(0)) binary.Write(w, binary.LittleEndian, dataIn) dataOffset += (entry.Size + 4) } outOffset += binary.Size(bif.VariableEntries[0]) } for _, entry := range bif.FixedEntries { b := make([]byte, entry.Size*entry.Number) //out := lzma.NewWriter(&b) r.Seek(int64(entry.Offset), os.SEEK_SET) io.ReadAtLeast(r, b, len(b)) w.Seek(int64(outOffset), os.SEEK_SET) entry.Offset = dataOffset binary.Write(w, binary.LittleEndian, entry) w.Seek(int64(dataOffset), os.SEEK_SET) w.Write(b) outOffset += binary.Size(bif.FixedEntries[0]) dataOffset += uint32(len(b)) } return nil }
// Make reads cdb-formatted records from r and writes a cdb-format database // to w. See the documentation for Dump for details on the input record format. func Make(w io.WriteSeeker, r io.Reader) (err error) { defer func() { // Centralize error handling. if e := recover(); e != nil { err = e.(error) } }() if _, err = w.Seek(int64(headerSize), 0); err != nil { return } buf := make([]byte, 8) rb := bufio.NewReader(r) wb := bufio.NewWriter(w) hash := cdbHash() hw := io.MultiWriter(hash, wb) // Computes hash when writing record key. rr := &recReader{rb} htables := make(map[uint32][]slot) pos := headerSize // Read all records and write to output. for { // Record format is "+klen,dlen:key->data\n" c := rr.readByte() if c == '\n' { // end of records break } if c != '+' { return BadFormatError } klen, dlen := rr.readNum(','), rr.readNum(':') writeNums(wb, klen, dlen, buf) hash.Reset() rr.copyn(hw, klen) rr.eatByte('-') rr.eatByte('>') rr.copyn(wb, dlen) rr.eatByte('\n') h := hash.Sum32() tableNum := h % 256 htables[tableNum] = append(htables[tableNum], slot{h, pos}) pos += 8 + klen + dlen } // Write hash tables and header. // Create and reuse a single hash table. maxSlots := 0 for _, slots := range htables { if len(slots) > maxSlots { maxSlots = len(slots) } } slotTable := make([]slot, maxSlots*2) header := make([]byte, headerSize) // Write hash tables. for i := uint32(0); i < 256; i++ { slots := htables[i] if slots == nil { putNum(header[i*8:], pos) continue } nslots := uint32(len(slots) * 2) hashSlotTable := slotTable[:nslots] // Reset table slots. for j := 0; j < len(hashSlotTable); j++ { hashSlotTable[j].h = 0 hashSlotTable[j].pos = 0 } for _, slot := range slots { slotPos := (slot.h / 256) % nslots for hashSlotTable[slotPos].pos != 0 { slotPos++ if slotPos == uint32(len(hashSlotTable)) { slotPos = 0 } } hashSlotTable[slotPos] = slot } if err = writeSlots(wb, hashSlotTable, buf); err != nil { return } putNum(header[i*8:], pos) putNum(header[i*8+4:], nslots) pos += 8 * nslots } if err = wb.Flush(); err != nil { return } if _, err = w.Seek(0, 0); err != nil { return } _, err = w.Write(header) return }
func (p *MemoryPartition) Compact(w io.WriteSeeker) error { if !p.readOnly { return errorPartitionNotReadyOnly } meta := map[metaKey]metaValue{} gzipWriter := gzip.NewWriter(w) sources := []string{} metricsBySource := map[string][]string{} for sourceName, source := range p.sources { sources = append(sources, sourceName) for metricName, metric := range source.metrics { metricsBySource[sourceName] = append(metricsBySource[sourceName], metricName) extents := splitIntoExtents(metric.points) currentOffset, err := w.Seek(0, 1) if err != nil { return err } metaVal := metaValue{ offset: currentOffset, numPoints: uint32(len(metric.points)), } for extentIndex, ext := range extents { gzipWriter.Close() gzipWriter = gzip.NewWriter(w) currentOffset, err := w.Seek(0, 1) if err != nil { return err } ext.offset = currentOffset err = binary.Write(gzipWriter, binary.LittleEndian, ext.points) if err != nil { return err } err = gzipWriter.Flush() if err != nil { return err } extents[extentIndex] = ext } metaVal.extents = extents meta[metaKey{sourceName, metricName}] = metaVal } } err := gzipWriter.Close() if err != nil { return err } metaStartOffset, err := w.Seek(0, 1) if err != nil { return err } // Start writing metadata // Magic sequence err = binary.Write(w, binary.BigEndian, disk.Magic) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, p.minTS) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, p.maxTS) if err != nil { return err } // Encode the number of sources err = binary.Write(w, binary.LittleEndian, uint16(len(sources))) if err != nil { return err } sort.Strings(sources) for _, sourceName := range sources { err = binary.Write(w, binary.LittleEndian, uint8(len(sourceName))) if err != nil { return err } _, err = w.Write([]byte(sourceName)) if err != nil { return err } metrics := metricsBySource[sourceName] sort.Strings(metrics) // Encode number of metrics err = binary.Write(w, binary.LittleEndian, uint16(len(metrics))) if err != nil { return err } for _, metricName := range metrics { err = binary.Write(w, binary.LittleEndian, uint8(len(metricName))) if err != nil { return err } _, err = w.Write([]byte(metricName)) if err != nil { return err } metadata := meta[metaKey{sourceName, metricName}] err = binary.Write(w, binary.LittleEndian, metadata.offset) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, uint32(metadata.numPoints)) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, uint32(len(metadata.extents))) if err != nil { return err } for _, ext := range metadata.extents { err = binary.Write(w, binary.LittleEndian, ext.startTS) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, ext.offset) if err != nil { return err } err = binary.Write(w, binary.LittleEndian, ext.numPoints) if err != nil { return err } } } } err = binary.Write(w, binary.LittleEndian, metaStartOffset) if err != nil { return err } return nil }
//MakeFactory creates CDB and returns an adder function which should be called //with each Element, and a closer, which finalizes the CDB. func MakeFactory(w io.WriteSeeker) (adder AdderFunc, closer CloserFunc, err error) { defer func() { // Centralize error handling. if e := recover(); e != nil { logger.Panicf("error: %s", e) err = e.(error) } }() if _, err = w.Seek(int64(headerSize), 0); err != nil { logger.Panicf("cannot seek to %d of %s: %s", headerSize, w, err) } buf := make([]byte, 8) wb := bufio.NewWriter(w) hash := cdbHash() hw := io.MultiWriter(hash, wb) // Computes hash when writing record key. htables := make(map[uint32][]slot) poshold := &posHolder{headerSize} // Read all records and write to output. adder = func(elt Element) error { var ( err error klen, dlen uint32 n int ) klen, dlen = uint32(len(elt.Key)), uint32(len(elt.Data)) writeNums(wb, klen, dlen, buf) hash.Reset() if n, err = hw.Write(elt.Key); err == nil && uint32(n) != klen { logger.Printf("klen=%d written=%d", klen, n) } else if err != nil { logger.Panicf("error writing key %s: %s", elt.Key, err) return err } if n, err = wb.Write(elt.Data); err == nil && uint32(n) != dlen { logger.Printf("dlen=%d written=%d", dlen, n) } else if err != nil { logger.Panicf("error writing data: %s", err) return err } h := hash.Sum32() tableNum := h % 256 htables[tableNum] = append(htables[tableNum], slot{h, poshold.pos}) poshold.pos += 8 + klen + dlen return nil } closer = func() error { var err error if err = wb.Flush(); err != nil { logger.Panicf("cannot flush %+v: %s", wb, err) return err } //if p, err := w.Seek(0, 1); err != nil || int64(pos) != p { // logger.Panicf("Thought I've written pos=%d bytes, but the actual position is %d! (error? %s)", pos, p, err) //} // Write hash tables and header. // Create and reuse a single hash table. pos := poshold.pos maxSlots := 0 for _, slots := range htables { if len(slots) > maxSlots { maxSlots = len(slots) } } slotTable := make([]slot, maxSlots*2) header := make([]byte, headerSize) // Write hash tables. for i := uint32(0); i < 256; i++ { slots := htables[i] if slots == nil { putNum(header[i*8:], pos) continue } nslots := uint32(len(slots) * 2) hashSlotTable := slotTable[:nslots] // Reset table slots. for j := 0; j < len(hashSlotTable); j++ { hashSlotTable[j].h = 0 hashSlotTable[j].pos = 0 } for _, slot := range slots { slotPos := (slot.h / 256) % nslots for hashSlotTable[slotPos].pos != 0 { slotPos++ if slotPos == uint32(len(hashSlotTable)) { slotPos = 0 } } hashSlotTable[slotPos] = slot } if err = writeSlots(wb, hashSlotTable, buf); err != nil { logger.Panicf("cannot write slots: %s", err) } putNum(header[i*8:], pos) putNum(header[i*8+4:], nslots) pos += 8 * nslots } if err = wb.Flush(); err != nil { logger.Panicf("error flushing %s: %s", wb, err) } if _, err = w.Seek(0, 0); err != nil { logger.Panicf("error seeking to begin of %s: %s", w, err) } if _, err = w.Write(header); err != nil { logger.Panicf("cannot write header: %s", err) } return err } return adder, closer, nil }