Example #1
0
func checkSumAdjust(w io.WriterAt, head []int64, checksum ULONG) {
	adjust := make([]byte, 4)
	binary.BigEndian.PutUint32(adjust, uint32(checkSumAdjustmentMagic-checksum))
	for _, offs := range head {
		w.WriteAt(adjust, offs+8)
	}
}
Example #2
0
func write(fp io.WriterAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte) {

	here := make(chan *message.Message, blocks)
	cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset})

	// Send invalidates for each block
	iopkt := &message.IoPkt{
		Address: cacheoffset,
		Blocks:  blocks,
	}
	c.Invalidate(iopkt)

	// Write to storage back end
	// :TODO: check return status
	fp.WriteAt(buffer, int64(offset)*4*KB)

	// Now write to cache
	msg := message.NewMsgPut()
	msg.RetChan = here
	iopkt = msg.IoPkt()
	iopkt.Blocks = blocks
	iopkt.Address = cacheoffset
	iopkt.Buffer = buffer
	c.Put(msg)

	<-here
}
Example #3
0
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
	f.link()
	for pgI, pg := range f.m {
		if !pg.dirty {
			continue
		}

		for pg.prev != nil && pg.prev.dirty {
			pg = pg.prev
			pgI--
		}

		for pg != nil && pg.dirty {
			if _, err := w.WriteAt(pg.data, pgI<<bfBits); err != nil {
				return 0, err
			}

			nwr++
			pg.dirty = false
			pg = pg.next
			pgI++
		}
	}
	return
}
Example #4
0
func writeTables(table []Table, w io.WriterAt, offset int64) (map[Table]*OffsetEntry, []int64, ULONG, error) {
	total := ULONG(0)
	entryMap := make(map[Table]*OffsetEntry)
	head := make([]int64, 0)
	for _, v := range table {
		t := v.Tag()
		ts := t.String()
		if ts == "head" {
			head = append(head, offset)
		}
		bytes := v.Bytes()
		checksum := calcCheckSum(bytes)
		length := len(bytes)
		if _, err := w.WriteAt(bytes, offset); err != nil {
			return nil, nil, 0, err
		}
		entryMap[v] = &OffsetEntry{
			t,
			checksum,
			ULONG(offset),
			ULONG(length),
		}
		offset = roundUp(offset + int64(length))
		total += checksum
	}
	return entryMap, head, total, nil
}
Example #5
0
func (o *Store) ItemValWrite(c *Collection, i *Item, w io.WriterAt, offset int64) error {
	if o.callbacks.ItemValWrite != nil {
		return o.callbacks.ItemValWrite(c, i, w, offset)
	}
	_, err := w.WriteAt(i.Val, offset)
	return err
}
Example #6
0
func (g *getter) WriteToWriterAt(w io.WriterAt) (int, error) {
	fileOffsetMap := make(map[string]int64)
	filePosition := int64(0)
	totalWritten := int(0)

	for chunk := range g.readCh {
		fileOffset, present := fileOffsetMap[chunk.path]
		if !present {
			fileOffset = filePosition
			fileOffsetMap[chunk.path] = filePosition
			filePosition += chunk.fileSize
		}

		var chunkWritten int = 0
		for int64(chunkWritten) < chunk.size {
			n, err := w.WriteAt(chunk.b[:chunk.size], fileOffset+chunk.start)
			chunkWritten += n
			totalWritten += n
			if err != nil {
				return totalWritten, err
			}
		}

		g.sp.give <- chunk.b
	}
	return totalWritten, nil
}
Example #7
0
func (self *rlewrite) Write(b io.WriterAt) (err error) {
	s := make([]byte, self.num)
	for i := range s {
		s[i] = self.data
	}
	_, err = b.WriteAt(s, int64(self.start))
	return
}
Example #8
0
func writeNumRecs(w io.WriterAt, numrecs int64) error {
	if numrecs >= (1<<31) || numrecs < 0 {
		numrecs = -1
	}
	buf := [4]byte{byte(numrecs >> 24), byte(numrecs >> 16), byte(numrecs >> 8), byte(numrecs)}
	_, err := w.WriteAt(buf[:], _NumRecsOffset)
	return err
}
Example #9
0
func writeAtWithBuffer(w io.WriterAt, str string, off int64) (int, error) {
	n := len(str)
	extendAndSliceBuffer(n)
	for i := 0; i < n; i++ {
		buffer[i] = str[i]
	}
	return w.WriteAt(buffer, off)
}
Example #10
0
func writeFontsOffset(w io.WriterAt, offset int64, fonts TTCOffsetTable) (ULONG, error) {
	buf := new(bytes.Buffer)
	binary.Write(buf, binary.BigEndian, fonts)
	bytes := buf.Bytes()
	if _, err := w.WriteAt(bytes, offset); err != nil {
		return 0, err
	}
	return calcCheckSum(bytes), nil
}
Example #11
0
func writeAt(w io.WriterAt, i interface{}, offset int64) (ULONG, int64, error) {
	buf := new(bytes.Buffer)
	binary.Write(buf, binary.BigEndian, i)
	bytes := buf.Bytes()
	if _, err := w.WriteAt(bytes, offset); err != nil {
		return 0, 0, err
	}
	return calcCheckSum(bytes), offset + int64(len(bytes)), nil
}
Example #12
0
func (info *blockListInfo) WriteData(writer io.WriterAt, data []byte) error {
	//Writes the data to the location specified by the info/entry
	if info.Entry.Free == 0 {
		return errors.New("Info is free, unable to write")
	}
	if int64(binary.Size(data)) != info.Entry.Size {
		return errors.New("Size of data is incorrect for size of info.")
	}
	_, err := writer.WriteAt(data, info.Entry.Start)
	return err
}
Example #13
0
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
	f.Lock()
	defer f.Unlock()
	f.link()
	for pgI, pg := range f.m {
		if !pg.dirty {
			continue
		}

		for pg.prev != nil && pg.prev.dirty {
			pg = pg.prev
			pgI--
		}

		for pg != nil && pg.dirty {
			last := false
			var off int64
			first := -1
			for i := 0; i < bfSize; i++ {
				flag := pg.flags[i>>3]&bitmask[i&7] != 0
				switch {
				case flag && !last: // Leading edge detected
					off = pgI<<bfBits + int64(i)
					first = i
				case !flag && last: // Trailing edge detected
					n, err := w.WriteAt(pg.data[first:i], off)
					if n != i-first {
						return 0, err
					}
					first = -1
					nwr++
				}

				last = flag
			}
			if first >= 0 {
				i := bfSize
				n, err := w.WriteAt(pg.data[first:i], off)
				if n != i-first {
					return 0, err
				}

				nwr++
			}

			pg.dirty = false
			pg = pg.next
			pgI++
		}
	}
	return
}
Example #14
0
func applyContent(cc <-chan content, dst io.WriterAt) error {
	var err error

	for c := range cc {
		_, err = dst.WriteAt(c.data, c.offset)
		buffers.Put(c.data)
		if err != nil {
			return err
		}
	}

	return nil
}
Example #15
0
func fill(w io.WriterAt, begin, end int64, val interface{}, dtype datatype) error {
	var buf bytes.Buffer
	binary.Write(&buf, binary.BigEndian, val)
	if buf.Len() != dtype.storageSize() {
		panic("invalid fill value")
	}
	d := int64(buf.Len())
	for ; begin < end; begin += d {
		if _, err := w.WriteAt(buf.Bytes(), begin); err != nil {
			return err
		}
	}
	return nil
}
Example #16
0
func replicate(dst io.WriterAt, src io.ReaderAt) (err error) {
	buf := make([]byte, 1<<12)
	p := int64(0)
	for {
		n, e := src.ReadAt(buf, p)
		err = e
		if n > 0 {
			dst.WriteAt(buf[:n], p)
		}
		if err != nil {
			break
		}
	}
	return
}
Example #17
0
// NewBufferedSectionWriter converts incoming Write() requests into
// buffered, asynchronous WriteAt()'s in a section of a file.
func NewBufferedSectionWriter(w io.WriterAt, begPos, maxBytes int64,
	bufSize int) *bufferedSectionWriter {
	stopCh := make(chan struct{})
	doneCh := make(chan struct{})
	reqCh := make(chan ioBuf)
	resCh := make(chan ioBuf)

	go func() {
		defer close(doneCh)
		defer close(resCh)

		buf := make([]byte, bufSize)
		var pos int64
		var err error

		for {
			select {
			case <-stopCh:
				return
			case resCh <- ioBuf{buf: buf, pos: pos, err: err}:
			}

			req, ok := <-reqCh
			if ok {
				buf, pos = req.buf, req.pos
				if len(buf) > 0 {
					_, err = w.WriteAt(buf, pos)
				}
			}
		}
	}()

	return &bufferedSectionWriter{
		w:   w,
		beg: begPos,
		cur: begPos,
		max: maxBytes,
		buf: make([]byte, bufSize),

		stopCh: stopCh,
		doneCh: doneCh,
		reqCh:  reqCh,
		resCh:  resCh,
	}
}
Example #18
0
// writeIndexEntry write in a .idx file to  the given `pos` the msgIDm msgOffset and msgSize
func writeIndexEntry(w io.WriterAt, id uint64, offset uint64, size uint32, pos uint64) error {
	position := int64(uint64(indexEntrySize) * pos)
	offsetBuffer := make([]byte, indexEntrySize)

	binary.LittleEndian.PutUint64(offsetBuffer, id)
	binary.LittleEndian.PutUint64(offsetBuffer[8:], offset)
	binary.LittleEndian.PutUint32(offsetBuffer[16:], size)

	if _, err := w.WriteAt(offsetBuffer, position); err != nil {
		logger.WithFields(log.Fields{
			"err":      err,
			"position": position,
			"id":       id,
		}).Error("Error writing index entry")
		return err
	}
	return nil
}
Example #19
0
func writeTableDirectory(f SFNT, entryMap map[Table]*OffsetEntry, w io.WriterAt, offset int64) (ULONG, error) {
	tag := make(sort.StringSlice, 0)
	for k, _ := range f {
		tag = append(tag, k)
	}
	sort.Sort(tag)
	entry := make([]OffsetEntry, f.NumTables())
	for i, ts := range tag {
		entry[i] = *(entryMap[f[ts]])
	}
	buf := new(bytes.Buffer)
	binary.Write(buf, binary.BigEndian, entry)
	bytes := buf.Bytes()
	if _, err := w.WriteAt(bytes, offset); err != nil {
		return 0, err
	}
	return calcCheckSum(bytes), nil
}
Example #20
0
func itemValWrite(coll *gkvlite.Collection, i *gkvlite.Item,
	w io.WriterAt, offset int64) error {
	if !strings.HasSuffix(coll.Name(), COLL_SUFFIX_CHANGES) {
		_, err := w.WriteAt(i.Val, offset)
		return err
	}
	if i.Val != nil {
		_, err := w.WriteAt(i.Val, offset)
		return err
	}
	if i.Transient == unsafe.Pointer(nil) {
		panic(fmt.Sprintf("itemValWrite saw nil Transient, i: %#v", i))
	}
	ti := (interface{})(i.Transient)
	item, ok := ti.(*item)
	if !ok {
		panic(fmt.Sprintf("itemValWrite invoked on non-item, i: %#v", i))
	}
	if item == nil {
		panic(fmt.Sprintf("itemValWrite invoked on nil item, i: %#v", i))
	}
	vBytes := item.toValueBytes()
	_, err := w.WriteAt(vBytes, offset)
	return err
}
Example #21
0
func (t *TinyRBuff) WriteAt(w io.WriterAt, off int) (w_len int, err error) {
	//check me: should use writev

	defer func() {
		//		fmt.Println("bufio.Write, off ", off, t.P())
	}()

	if t.Tail <= t.Checked {
		w_len, err = w.WriteAt(t.Buf[t.Tail:t.Checked], int64(off))
		t.Tail += w_len
		off += w_len
		return w_len, err
	}
	if t.Tail < t.OutHead {

		w_len, err = w.WriteAt(t.Buf[t.Tail:t.OutHead], int64(off))
		t.Tail += w_len
		if t.Tail == t.OutHead {
			t.Tail = 0
			t.OutHead = 0
			t.DupSize = 0
		}
		if err != nil || t.OutHead != 0 {
			return w_len, err
		}
		var out_len int
		out_len, err = w.WriteAt(t.Buf[t.Tail:t.Checked], int64(off+w_len))
		w_len += out_len
		t.Tail += out_len

		return w_len, err
	}
	return 0, errors.New("this data is not written")
}
Example #22
0
func (self *zpwrite) Write(b io.WriterAt) (err error) {
	if self.repeat == 1 {
		_, err = b.WriteAt(self.data, self.Org())
		return
	}
	l := self.Len()
	if l < 1<<20 { // just some arbitrary limit i guess
		// Since the write is small, just do it all at once.
		// This is probably suboptimal if the writerat isn't a file,
		// but it's probably a file.
		w := make([]byte, l)
		for i := 0; int64(i) < l; i += len(self.data) {
			copy(w[i:], self.data)
		}
		_, err = b.WriteAt(w, self.Org())
		return
	}
	for i := int64(0); i < l; i += int64(len(self.data)) {
		_, err = b.WriteAt(self.data, self.Org()+i)
		if err != nil {
			return
		}
	}
	return
}
Example #23
0
File: wav.go Project: metakeule/wav
// New writes the given waveform to the given WriterAt.
func New(w io.WriterAt, samplesPerSecond uint32, bitsPerSample uint8, channels uint16, waveform []byte) error {
	header := newWavfileHeader(samplesPerSecond, bitsPerSample, channels)

	var size uint32

	written, err := w.WriteAt(header.Bytes(), 0)
	size += uint32(written)
	if err != nil {
		return err
	}

	// Write the data starting at offset 44, which is the first offset after the header.
	written, err = w.WriteAt(waveform, 44)
	size += uint32(written)
	if err != nil {
		return err
	}

	var (
		wavfileHeaderSize uint32 = 44 // bytes
		riffLength        uint32 = size - 8
		dataLength        uint32 = size - wavfileHeaderSize
	)

	// Write the riffLength into the header
	rl := make([]byte, 4)
	binary.LittleEndian.PutUint32(rl, riffLength)
	_, err = w.WriteAt(rl, 4)
	if err != nil {
		return err
	}

	// Write the length of the file into the header
	// The dataLength header starts at offset 40
	dl := make([]byte, 4)
	binary.LittleEndian.PutUint32(dl, dataLength)
	_, err = w.WriteAt(dl, 40)
	return err
}
Example #24
0
func (self *ipswrite) Write(b io.WriterAt) (err error) {
	_, err = b.WriteAt(self.data, int64(self.start))
	return
}
Example #25
0
func WriteInt32At(w io.WriterAt, num int, offset int64) {
	buf := bytes.NewBuffer([]byte{})
	binary.Write(buf, binary.LittleEndian, uint32(num))
	w.WriteAt(buf.Bytes()[:4], offset)
}
Example #26
0
// WriteAt write file at specified offset.
func (n *Needle) WriteAt(offset uint32, wr io.WriterAt) (err error) {
	_, err = wr.WriteAt(n.buffer[:n.TotalSize], BlockOffset(offset))
	return
}
Example #27
0
func (self *diffwrite) Write(b io.WriterAt) (err error) {
	_, err = b.WriteAt(self.data, self.Org())
	return
}
Example #28
0
// Download file from filesystem to an io.WriterAt
func (m Mega) Download(src *Node, outfile io.WriterAt, progress *chan int) (int64, error) {
	m.FS.mutex.Lock()
	defer m.FS.mutex.Unlock()

	defer func() {
		if progress != nil {
			close(*progress)
		}
	}()

	if src == nil {
		return 0, EARGS
	}

	var msg [1]DownloadMsg
	var res [1]DownloadResp
	var mutex sync.Mutex

	msg[0].Cmd = "g"
	msg[0].G = 1
	msg[0].N = src.hash

	request, _ := json.Marshal(msg)
	result, err := m.api_request(request)
	if err != nil {
		return 0, err
	}

	err = json.Unmarshal(result, &res)
	if err != nil {
		return 0, err
	}
	resourceUrl := res[0].G

	_, err = decryptAttr(src.meta.key, []byte(res[0].Attr))

	aes_block, _ := aes.NewCipher(src.meta.key)

	mac_data := a32_to_bytes([]uint32{0, 0, 0, 0})
	mac_enc := cipher.NewCBCEncrypter(aes_block, mac_data)
	t := bytes_to_a32(src.meta.iv)
	iv := a32_to_bytes([]uint32{t[0], t[1], t[0], t[1]})

	sorted_chunks := []int{}
	filesize := int64(res[0].Size)
	chunks := getChunkSizes(int(filesize))
	chunk_macs := make([][]byte, len(chunks))

	for k, _ := range chunks {
		sorted_chunks = append(sorted_chunks, k)
	}
	sort.Ints(sorted_chunks)

	workch := make(chan int)
	errch := make(chan error, m.dl_workers)
	wg := sync.WaitGroup{}

	// Fire chunk download workers
	for w := 0; w < m.dl_workers; w++ {
		wg.Add(1)

		go func() {
			defer wg.Done()

			// Wait for work blocked on channel
			for id := range workch {
				var resource *http.Response
				var err error
				mutex.Lock()
				chk_start := sorted_chunks[id]
				chk_size := chunks[chk_start]
				mutex.Unlock()
				chunk_url := fmt.Sprintf("%s/%d-%d", resourceUrl, chk_start, chk_start+chk_size-1)
				for retry := 0; retry < m.retries+1; retry++ {
					resource, err = client.Get(chunk_url)
					if err == nil {
						if resource.StatusCode == 200 {
							break
						} else {
							resource.Body.Close()
						}
					}
				}

				var ctr_iv []uint32
				var ctr_aes cipher.Stream
				var chunk []byte

				if err == nil {
					ctr_iv = bytes_to_a32(src.meta.iv)
					ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000)
					ctr_iv[3] = uint32(chk_start / 0x10)
					ctr_aes = cipher.NewCTR(aes_block, a32_to_bytes(ctr_iv))
					chunk, err = ioutil.ReadAll(resource.Body)
				}

				if err != nil {
					errch <- err
					return
				}
				resource.Body.Close()
				ctr_aes.XORKeyStream(chunk, chunk)
				outfile.WriteAt(chunk, int64(chk_start))

				enc := cipher.NewCBCEncrypter(aes_block, iv)
				i := 0
				block := []byte{}
				chunk = paddnull(chunk, 16)
				for i = 0; i < len(chunk); i += 16 {
					block = chunk[i : i+16]
					enc.CryptBlocks(block, block)
				}

				mutex.Lock()
				if len(chunk_macs) > 0 {
					chunk_macs[id] = make([]byte, 16)
					copy(chunk_macs[id], block)
				}
				mutex.Unlock()

				if progress != nil {
					*progress <- chk_size
				}
			}
		}()
	}

	// Place chunk download jobs to chan
	err = nil
	for id := 0; id < len(chunks) && err == nil; {
		select {
		case workch <- id:
			id++
		case err = <-errch:
		}
	}
	close(workch)

	wg.Wait()

	for _, v := range chunk_macs {
		mac_enc.CryptBlocks(mac_data, v)
	}

	tmac := bytes_to_a32(mac_data)
	if bytes.Equal(a32_to_bytes([]uint32{tmac[0] ^ tmac[1], tmac[2] ^ tmac[3]}), src.meta.mac) == false {
		return 0, EMACMISMATCH
	}

	return filesize, nil
}
Example #29
0
func writeAtWithoutBuffer(w io.WriterAt, str string, off int64) (int, error) {
	return w.WriteAt([]byte(str), off)
}