Пример #1
0
func (fst fileStorageTorrent) WriteAt(p []byte, off int64) (n int, err error) {
	for _, fi := range fst.fts.info.UpvertedFiles() {
		if off >= fi.Length {
			off -= fi.Length
			continue
		}
		n1 := len(p)
		if int64(n1) > fi.Length-off {
			n1 = int(fi.Length - off)
		}
		name := fst.fts.fileInfoName(fi)
		os.MkdirAll(filepath.Dir(name), 0770)
		var f *os.File
		f, err = os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0660)
		if err != nil {
			return
		}
		n1, err = f.WriteAt(p[:n1], off)
		f.Close()
		if err != nil {
			return
		}
		n += n1
		off = 0
		p = p[n1:]
		if len(p) == 0 {
			break
		}
	}
	return
}
Пример #2
0
func download_segments(threadnum uint, url string, backlog chan Download, instructions chan Download,
	updates chan ProgressUpdate, file *os.File, progressFile *os.File) {

	buf := make([]byte, 8192)
	total := int64(0)
	errorCount := int64(0)

	var down Download
	for {
		select {
		case down = <-backlog:
		case down = <-instructions:
		}
		if down.size == 0 {
			break
		}

		req, err := http.NewRequest("GET", url, nil)
		if err != nil {
			fmt.Fprintln(os.Stderr, "Failed to create request")
			os.Exit(1)
		}

		req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", down.position, down.position+down.size-1))

		resp, err := client.Do(req)
		if err != nil || (resp.StatusCode != 206 && resp.StatusCode != 200) {
			backlog <- down
			updates <- ProgressUpdate{threadnum: threadnum, downloaded: -errorCount}
			time.Sleep(time.Duration(1<<uint64(errorCount)) * 10 * time.Millisecond)
			errorCount++
			continue
		}

		errorCount = 0

		read := int64(0)

		for read < down.size {
			n, err := resp.Body.Read(buf[:])
			if n == 0 && err != nil {
				break
			}

			file.WriteAt(buf[:n], down.position+read)
			read += int64(n)
			total += int64(n)
			updates <- ProgressUpdate{threadnum: threadnum, downloaded: total}
		}

		if read >= down.size {
			buf[0] = finishedSegment
			progressFile.WriteAt(buf[:1], baseOffset+int64(down.segmentNum))
		} else {
			down.position += read
			down.size -= read
			backlog <- down
		}
	}
}
Пример #3
0
func download(partDownloadInfosChan <-chan PartDownloadInfo, downloadFile *os.File) {
	for partDownloadInfo := range partDownloadInfosChan {

		client := &http.Client{}
		req, err := http.NewRequest("GET", partDownloadInfo.Url, nil)
		if err != nil {
			log.Fatal(err)
		}
		range_header := "bytes=" + strconv.Itoa(partDownloadInfo.OffsetMin) + "-" + strconv.Itoa(partDownloadInfo.OffsetMax-1)
		req.Header.Add("Range", range_header)
		log.Printf("正在下载第%d块大小为%.3fMB文件,请求头为 %s \n", partDownloadInfo.Index, float64(partDownloadInfo.OffsetMax-partDownloadInfo.OffsetMin)/MB, range_header)
		resp, _ := client.Do(req)
		defer resp.Body.Close()

		log.Println("=========>> ", resp.ContentLength, resp.StatusCode, resp.Proto)
		actual_part_size := partDownloadInfo.OffsetMax - partDownloadInfo.OffsetMin
		if resp.ContentLength == int64(actual_part_size) {
			reader, _ := ioutil.ReadAll(resp.Body)
			downloadFile.WriteAt(reader, int64(partDownloadInfo.OffsetMin))
		} else {
			buf := bytes.NewBuffer(make([]byte, actual_part_size))
			n, err := buf.ReadFrom(resp.Body)
			if err != nil && err != io.EOF {
				log.Fatal(err)
			}

			log.Printf("已经读取了%n个字节\n", n)
			downloadFile.WriteAt(buf.Bytes(), int64(partDownloadInfo.OffsetMin))
		}

		wg.Done()
	}
}
Пример #4
0
func appendWriteFile(fp *os.File, buf []byte) (int, error) {
	stat, err := fp.Stat()
	if err != nil {
		return -1, err
	}

	return fp.WriteAt(buf, stat.Size())
}
Пример #5
0
func writeMarshaledData(f *os.File, v interface{}) error {
	data, err := json.Marshal(&v)
	if err != nil {
		return err
	}
	_, err = f.WriteAt(data, 0)
	return err
}
Пример #6
0
// Downloads the given range. In case of an error, sleeps for 10s and tries again.
func DownloadRange(file *File, fp *os.File, offset int64, size int64, rangeWg *sync.WaitGroup, chunkIndex bitField, reportCh chan Report) {
	defer rangeWg.Done()
	reportCh <- Report{ToDownload: size}
	newOffset := offset
	lastByte := offset + size           // The byte we won't be getting
	lastIndex := lastByte/ChunkSize - 1 // The last index we'll fill

	// Creating a custom request because it will have Range header in it
	req, _ := http.NewRequest("GET", file.DownloadUrl(), nil)

	rangeHeader := fmt.Sprintf("bytes=%d-%d", offset, lastByte-1)
	req.Header.Add("Range", rangeHeader)

	// http.DefaultClient does not copy headers while following redirects
	client := &http.Client{
		CheckRedirect: func(req *http.Request, via []*http.Request) error {
			req.Header.Add("Range", rangeHeader)
			return nil
		},
	}

	resp, err := client.Do(req)
	if err != nil {
		log.Println(err)
		return
	}
	defer resp.Body.Close()

	buffer := make([]byte, ChunkSize)
	for {
		nr, er := io.ReadFull(resp.Body, buffer)
		if nr > 0 {
			nw, ew := fp.WriteAt(buffer[0:nr], newOffset)
			nWritten := int64(nw)
			newOffset += nWritten
			currentIndex := newOffset/ChunkSize - 1
			if currentIndex == lastIndex && newOffset != lastByte {
				// dont mark the last bit done without finishing the whole range
			} else {
				chunkIndex.Set(currentIndex)
				fp.WriteAt(chunkIndex, file.Size)
			}
			reportCh <- Report{Downloaded: nWritten}
			if ew != nil {
				log.Println(ew)
				return
			}
		}
		if er == io.EOF || er == io.ErrUnexpectedEOF {
			return
		}
		if er != nil {
			log.Println(er)
			return
		}
	}
}
Пример #7
0
func (b *Block) writeNode(node *Node, file *os.File, offset uint64,
	buffer *bytes.Buffer, bufferSize uint32, compressedSize uint32) uint64 {

	if b.used && b.nodeID != node.id {
		logger.Fatal("Block Is Used")
	}

	if buffer == nil {
		logger.Fatal("buffer is blank")
	}

	if compressedSize < 1 {
		logger.Fatal("compressedSize < 1")
	}

	if !b.available(compressedSize) {
		logger.Fatal("block unavailable")
	}

	if b.offset > 0 {
		offset = b.offset
	}

	// a, _ := ioutil.ReadFile(file.Name())

	n, err := file.WriteAt(buffer.Bytes(), int64(offset))

	file.Sync()

	if err != nil {
		logger.Fatal(err)
	}

	if n != int(compressedSize) {
		logger.Fatal("Write File Error")
	}

	if b.blockSize == 0 {
		b.blockSize = compressedSize
	}

	if b.blockSize < MinOneBlockDiskSize {
		b.blockSize = MinOneBlockDiskSize
	}

	crcValue := checksum(buffer)

	b.nodeSize = bufferSize
	b.compressedSize = compressedSize
	b.nodeID = node.id
	b.offset = offset
	b.crcValue = crcValue
	b.used = true

	return offset + uint64(b.blockSize)
}
Пример #8
0
func (file *File) RangeWrite(dest *os.File, start int64, end int64, chMulDow chan int64, partNum int64) {
	var written int64
	var p float32
	var flag = map[int]interface{}{}
	ioReader, err := file.ReqHttpRange(start, end-1)
	reqRangeSize := end - start
	file.WsRespData.Status = "keep"
	if err != nil {
		return
	}
	buf := make([]byte, 32*1024)
	for {
		nr, er := ioReader.Read(buf)
		if nr > 0 {
			nw, ew := dest.WriteAt(buf[0:nr], start)
			start = int64(nw) + start
			if nw > 0 {
				written += int64(nw)
			}
			if ew != nil {
				err = ew
			}
			if nr != nw {
				err = errors.New("short write")
			}

			p = float32(written) / float32(reqRangeSize) * 100
			pp := int(p)
			if pp >= 20 && pp%20 == 0 {
				if flag[pp] != true {
					file.WsRespData.Progress = pp / int(MulSectionDowCount)
					file.WsRespData.PartNum = int(partNum)
					websocket.JSON.Send(file.Ws, file.WsRespData)
					fmt.Printf("%s part%d progress: %v%%\n", file.Name, partNum, int(p))
				}
				flag[pp] = true
			}
		}
		if er != nil {
			if er.Error() == "EOF" {
				//Successfully finish downloading
				if reqRangeSize == written {
					fmt.Printf("%s part%d written  %d\n", file.Name, partNum, written)
					chMulDow <- written
				} else {
					fmt.Printf("%s part%d written  %d (unfinished)\n", file.Name, partNum, written)
					chMulDow <- -1
				}
				break
			}
			fmt.Printf("part%d downloading error : %s\n", partNum, er.Error())
			chMulDow <- -1
			break
		}
	}
}
Пример #9
0
// WriteAt write from unaligned data buffer via direct I/O
// Use AllocateAligned to avoid extra data fuffer copy
func WriteAt(file *os.File, data []byte, offset int64) (int, error) {
	if alignmentShift(data) == 0 {
		return file.WriteAt(data, offset)
	}
	// Write unaligned
	buf := AllocateAligned(len(data))
	copy(buf, data)
	n, err := file.WriteAt(buf, offset)
	return n, err
}
func (c Client) Get(
	location *os.File,
	contentURL string,
	progressWriter io.Writer,
) error {
	req, err := http.NewRequest("HEAD", contentURL, nil)
	if err != nil {
		return fmt.Errorf("failed to construct HEAD request: %s", err)
	}

	resp, err := c.httpClient.Do(req)
	if err != nil {
		return fmt.Errorf("failed to make HEAD request: %s", err)
	}

	contentURL = resp.Request.URL.String()

	ranges, err := c.ranger.BuildRange(resp.ContentLength)
	if err != nil {
		return fmt.Errorf("failed to construct range: %s", err)
	}

	c.bar.SetOutput(progressWriter)
	c.bar.SetTotal(resp.ContentLength)
	c.bar.Kickoff()

	defer c.bar.Finish()

	var g errgroup.Group
	for _, r := range ranges {
		byteRange := r
		g.Go(func() error {
			respBytes, err := c.retryableRequest(contentURL, byteRange.HTTPHeader)
			if err != nil {
				return fmt.Errorf("failed during retryable request: %s", err)
			}

			bytesWritten, err := location.WriteAt(respBytes, byteRange.Lower)
			if err != nil {
				return fmt.Errorf("failed to write file: %s", err)
			}

			c.bar.Add(bytesWritten)

			return nil
		})
	}

	if err := g.Wait(); err != nil {
		return err
	}

	return nil
}
Пример #11
0
func (h *regionFileHeader) SetOffset(chunkLoc ChunkXz, offset chunkOffset, file *os.File) os.Error {
	index := indexForChunkLoc(chunkLoc)
	h[index] = offset

	// Write that part of the index.
	var offsetBytes [4]byte
	binary.BigEndian.PutUint32(offsetBytes[:], uint32(offset))
	_, err := file.WriteAt(offsetBytes[:], int64(index)*4)

	return err
}
Пример #12
0
func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
	defer writeWg.Done()

	for content := range cc {
		_, err := outFile.WriteAt(content.data, content.offset)
		buffers.Put(content.data)
		if err != nil {
			m.writeError = err
			return
		}
	}
}
Пример #13
0
func (r *HdCache) WriteAt(p []byte, off int64) []chunk {
	boxI := int(off / int64(r.pieceSize))
	boxOff := int(off % int64(r.pieceSize))

	for i := 0; i < len(p); {
		var box *os.File
		var err error
		if !r.boxExists.IsSet(boxI) { //box doesn't exist, so we'll make one.
			box, err = os.Create(r.boxPrefix + strconv.Itoa(boxI))
			if err != nil {
				log.Panicln("Couldn't create cache file:", err)
				return nil
			}
			r.boxExists.Set(boxI)
			box.Truncate(int64(r.pieceSize))
			r.actualUsage++
		} else { //box exists, so we'll open it
			box, err = os.OpenFile(r.boxPrefix+strconv.Itoa(boxI), os.O_WRONLY, 0777)
			if err != nil {
				log.Println("Error opening cache item we thought we had:", r.boxPrefix+strconv.Itoa(boxI), "error:", err)
				r.removeBox(boxI)
				continue //loop around without incrementing 'i', forget this ever happened
			}
		}
		end := r.pieceSize - boxOff
		if len(p) < end {
			end = len(p)
		}
		copied, err := box.WriteAt(p[i:end], int64(boxOff))
		if err != nil {
			log.Panicln("Error at write cache box:", box.Name(), "error:", err)
		}
		i += copied
		box.Close()
		r.atimes[boxI] = time.Now()
		if copied == r.pieceSize {
			r.isBoxFull.Set(boxI)
		} else {
			if r.isByteSet[boxI].n == 0 {
				r.isByteSet[boxI] = *NewBitset(r.pieceSize)
			}
			for j := boxOff; j < boxOff+copied; j++ {
				r.isByteSet[boxI].Set(j)
			}
		}
		boxI++
		boxOff = 0
	}
	if r.actualUsage > r.getCapacity() {
		return r.trim()
	}
	return nil
}
Пример #14
0
// Creates a new chunk - extends file
func createChunk(h *HANDLE, chunkIdx int64, buf []byte, off int, sz int) (int, error) {
	var chFile *os.File
	var err error
	var wrote int

	cacheName := h.f.cacheName + "." + strconv.FormatInt(chunkIdx, 10)
	log.WithFields(log.Fields{
		"CacheName": cacheName,
		"ChunkIdx":  chunkIdx,
		"Offset":    off,
		"Size":      sz,
	}).Debug("Revelo::createChunk")

	err = os.MkdirAll(path.Dir(cacheName), 0700) //XXX revisit permission
	if err != nil {
		log.WithFields(log.Fields{
			"CacheName": cacheName,
			"Perm":      0700,
			"Error":     err,
		}).Error("Revelo::createChunk: Cannot MkdirAll")
		return 0, err
	}

	chFile, err = os.OpenFile(cacheName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
	if err != nil {
		// Chunk exists - we should not be here
		log.WithFields(log.Fields{
			"CacheName": cacheName,
			"Size":      sz,
			"ChunkIdx":  chunkIdx,
			"Error":     err,
		}).Error("Revelo::createChunk: Open failed")
		return 0, err
	}
	defer chFile.Close()

	wrote, err = chFile.WriteAt(buf, int64(off))
	if err != nil {
		log.WithFields(log.Fields{
			"ChunkName": cacheName,
			"Off":       off,
			"Size":      sz,
			"Wrote":     wrote,
			"Error":     err,
		}).Error("Revelo::createChunk: WriteAt failed")
		return 0, err
	}

	return wrote, nil
}
Пример #15
0
func shiftBytesBack(file *os.File, start, offset int64) error {
	stat, err := file.Stat()
	if err != nil {
		return err
	}
	end := stat.Size()

	wrBuf := make([]byte, offset)
	rdBuf := make([]byte, offset)

	wrOffset := offset
	rdOffset := start

	rn, err := file.ReadAt(wrBuf, rdOffset)
	if err != nil && err != io.EOF {
		panic(err)
	}
	rdOffset += int64(rn)

	for {
		if rdOffset >= end {
			break
		}

		n, err := file.ReadAt(rdBuf, rdOffset)
		if err != nil && err != io.EOF {
			return err
		}

		if rdOffset+int64(n) > end {
			n = int(end - rdOffset)
		}

		if _, err := file.WriteAt(wrBuf[:rn], wrOffset); err != nil {
			return err
		}

		rdOffset += int64(n)
		wrOffset += int64(rn)
		copy(wrBuf, rdBuf)
		rn = n
	}

	if _, err := file.WriteAt(wrBuf[:rn], wrOffset); err != nil {
		return err
	}

	return nil
}
Пример #16
0
// pwritev writes multiple buffers in one operation. Returns number of bytes
// written and any error happened during the write.
func (this *WriteAheadLog) pwritev(file *os.File, offset int64,
	bufferList [][]byte) (int64, error) {

	totalWrote := 0
	for _, buffer := range bufferList {
		numWrote, errWrite := file.WriteAt(buffer, offset)
		totalWrote += numWrote
		offset += int64(totalWrote)

		if errWrite != nil {
			this.Errorf("could not write to file at offset %d: %v", offset,
				errWrite)
			return int64(totalWrote), errWrite
		}
	}
	return int64(totalWrote), nil
}
Пример #17
0
/*
If file size match capacity, use the file as bitmap.
Otherwise create the file sized to capacity and zero filled.
*/
func NewFileBacked(f *os.File, capacity int) *FileBackedBitSet {
	fi, err := f.Stat()
	if err != nil {
		panic(err)
	}
	b := &FileBackedBitSet{c: capacity, changes: make(map[int]map[int]bool)}
	size := b.FileByteLength()
	fileSize := fi.Size()
	if fileSize > size {
		panic("unexpected: file to big") //f.Truncate(0)
	}
	if fileSize < size {
		_, err := f.WriteAt(make([]byte, size), 0)
		if err != nil {
			panic(err)
		}
	}
	b.f = f
	return b
}
Пример #18
0
func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
	defer inFile.Close()
	defer writeWg.Done()

	var buf = buffers.Get(BlockSize)
	defer buffers.Put(buf)

	for _, lb := range m.localBlocks {
		buf = buf[:lb.Size]
		_, err := inFile.ReadAt(buf, lb.Offset)
		if err != nil {
			m.copyError = err
			return
		}
		_, err = outFile.WriteAt(buf, lb.Offset)
		if err != nil {
			m.copyError = err
			return
		}
	}
}
Пример #19
0
func (backend *BlobsFileBackend) Delete(hash string) error {
	if !backend.loaded {
		panic("backend BlobsFileBackend not loaded")
	}
	if backend.writeOnly {
		return nil
		//panic("backend is in write-only mode")
	}
	blobPos, err := backend.index.GetPos(hash)
	if err != nil {
		return fmt.Errorf("Error fetching GetPos: %v", err)
	}
	if blobPos == nil {
		return fmt.Errorf("Blob %v not found in index", err)
	}
	var f *os.File
	// check if the file is already open for writing
	if blobPos.n == backend.n {
		f = backend.current
	} else {
		f, err = os.OpenFile(backend.filename(blobPos.n), os.O_RDWR, 0666)
		if err != nil {
			return fmt.Errorf("failed to open blobsfile %v", backend.filename(blobPos.n), err)
		}
		defer f.Close()
	}
	// Add Deleted to the flag
	if _, err := f.WriteAt([]byte{Deleted}, int64(blobPos.offset+hashSize)); err != nil {
		return err
	}
	// Delete the index entry
	if err := backend.index.DeletePos(hash); err != nil {
		return err
	}
	// Punch a hole in the file if possible
	if err := fileutil.PunchHole(f, int64(blobPos.offset+Overhead), int64(blobPos.size)); err != nil {
		return fmt.Errorf("failed to punch hole: %v", err)
	}
	return nil
}
func removeLines(fn string, start, n int) (err error) {
	if start < 1 {
		return errors.New("invalid request.  line numbers start at 1.")
	}
	if n < 0 {
		return errors.New("invalid request.  negative number to remove.")
	}
	var f *os.File
	if f, err = os.OpenFile(fn, os.O_RDWR, 0); err != nil {
		return
	}
	defer func() {
		if cErr := f.Close(); err == nil {
			err = cErr
		}
	}()
	var b []byte
	if b, err = ioutil.ReadAll(f); err != nil {
		return
	}
	cut, ok := skip(b, start-1)
	if !ok {
		return fmt.Errorf("less than %d lines", start)
	}
	if n == 0 {
		return nil
	}
	tail, ok := skip(cut, n)
	if !ok {
		return fmt.Errorf("less than %d lines after line %d", n, start)
	}
	t := int64(len(b) - len(cut))
	if err = f.Truncate(t); err != nil {
		return
	}
	if len(tail) > 0 {
		_, err = f.WriteAt(tail, t)
	}
	return
}
Пример #21
0
func (pointer BPlusPointer) WriteNode(f *os.File, node *BPlusNode) error {
	if pointer == InvalidPointer || pointer > IndexPointer {
		return ErrInvalidPointer
	}

	var buf = make([]byte, 0, NodeSize)

	for i := 0; i < Order; i++ {
		buf = append(buf, SerializeKey(node.Keys[i])...)
	}
	for i := 0; i < Order; i++ {
		buf = append(buf, SerializePointer(node.Children[i])...)
	}
	buf = append(buf, SerializePointer(node.Next)...)

	_, err := f.WriteAt(buf, int64(pointer))
	if err != nil {
		return err
	}

	return nil
}
Пример #22
0
// Save only new artists
func (is *IndexSaver) Save(pathfile string, trunc bool) {
	path := filepath.Join(pathfile)
	// TRUNC or NOT
	var f *os.File
	var err error
	if trunc {
		f, err = os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, os.ModePerm)
		f.Write(getInt32AsByte(int32(len(is.values))))
	} else {
		f, err = os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, os.ModePerm)
		if err == nil {
			// New, write size
			f.Write(getInt32AsByte(int32(len(is.values))))
		} else {
			f, _ = os.OpenFile(path, os.O_RDWR, os.ModePerm)
			f.WriteAt(getInt32AsByte(int32(len(is.values))), 0)
			f.Seek(0, 2)
		}
	}
	is.current = 0
	io.Copy(f, is)
	f.Close()
}
Пример #23
0
func worker(f, t *os.File, from, to string, fail chan bool) {
	var buf [Defb]byte
	var bp []byte

	l := len(buf)
	bp = buf[0:]
	o := <-offchan
	for {
		n, err := f.ReadAt(bp, o)
		if err != nil && err != io.EOF {
			fmt.Printf("reading %s at %v: %v\n", from, o, err)
			fail <- true
			return
		}
		if n == 0 {
			break
		}

		nb := bp[0:n]
		n, err = t.WriteAt(nb, o)
		if err != nil {
			fmt.Printf("writing %s: %v\n", to, err)
			fail <- true
			return
		}
		bp = buf[n:]
		o += int64(n)
		l -= n
		if l == 0 {
			l = len(buf)
			bp = buf[0:]
			o = <-offchan
		}
	}
	fail <- false
}
func gRepFile(oldb, newb []byte, fn string) (err error) {
	var f *os.File
	if f, err = os.OpenFile(fn, os.O_RDWR, 0); err != nil {
		return
	}
	defer func() {
		if cErr := f.Close(); err == nil {
			err = cErr
		}
	}()
	var b []byte
	if b, err = ioutil.ReadAll(f); err != nil {
		return
	}
	if bytes.Index(b, oldb) < 0 {
		return
	}
	r := bytes.Replace(b, oldb, newb, -1)
	if err = f.Truncate(0); err != nil {
		return
	}
	_, err = f.WriteAt(r, 0)
	return
}
Пример #25
0
func express(file *os.File, urlStr string, pipe chan [2]int64, repipe chan int64, id int) {
	for {
		task := <-pipe
		start := task[0]
		end := task[1]

		client := &http.Client{}
		req, err := http.NewRequest("GET", urlStr, nil)
		if nil != err {
			fmt.Fprintln(os.Stderr, err)
			return
		}
		req.Header.Add("range", strconv.FormatInt(start, 10)+"-"+strconv.FormatInt(end, 10))
		resp, err := client.Do(req)
		if nil != err {
			fmt.Fprintln(os.Stderr, err)
			return
		}
		if 200 == resp.StatusCode && 0 != id {
			break
		}
		body, err := ioutil.ReadAll(resp.Body)
		if nil != err {
			fmt.Fprintln(os.Stderr, err)
			return
		}
		resp.Body.Close()
		leng, err := file.WriteAt(body, start)
		if nil != err {
			fmt.Fprintln(os.Stderr, err)
			return
		}
		repipe <- int64(leng)
	}
	return
}
Пример #26
0
/******************************************************************************
 @brief
 	写入文件信息头
 @author
 	chenzhiguo
 @param
	fp					文件名柄
	offset				偏移位置,从这个位置开始写
 @return
 	int					返回写入到文件中的长度
 	error				返回nil,写入成功,否则,表示失败原因
 @history
 	2015-05-16_09:23 	chenzhiguo		创建
*******************************************************************************/
func (this *FileInfo) Write(fp *os.File, offset int64) (int, error) {

	buf, err := this.EncodeFileInfo(this)
	if err != nil {
		return 0, nil
	}

	//写入gob长度
	l := uint16(len(buf))
	temp := make([]byte, 2)
	binary.BigEndian.PutUint16(temp, l)
	if _, err := fp.WriteAt(temp, offset); err != nil {
		return 0, err
	}

	//写入gob
	n, err := fp.WriteAt(buf, offset+2)
	if err != nil {
		return 0, err
	}

	if n != int(l) {
		return 0, err
	}

	//写入成功标记
	temp = make([]byte, 2)
	binary.BigEndian.PutUint16(temp, FHE)
	n, err = fp.WriteAt(temp, int64(offset)+2+int64(l))
	if err != nil {
		return 0, err
	}

	if n != 2 {
		return 0, err
	}

	return (2 + int(l) + 2), nil
}
Пример #27
0
func updateCursor(offset int64, writer *os.File) {
	writer.WriteAt([]byte(strconv.FormatInt(offset, 10)), 0)
}
Пример #28
0
func writeData(data []byte, offset int, fo *os.File) {
	_, err := fo.WriteAt(data, int64(offset))
	if err != nil {
		log.Println("Error writing to file: " + err.Error())
	}
}
Пример #29
0
// Download file from filesystem
func (m Mega) DownloadFile(src *Node, dstpath string, progress *chan int) error {
	m.FS.mutex.Lock()
	defer m.FS.mutex.Unlock()

	defer func() {
		if progress != nil {
			close(*progress)
		}
	}()

	if src == nil {
		return EARGS
	}

	var msg [1]DownloadMsg
	var res [1]DownloadResp
	var outfile *os.File
	var mutex sync.Mutex

	_, err := os.Stat(dstpath)
	if os.IsExist(err) {
		os.Remove(dstpath)
	}

	outfile, err = os.OpenFile(dstpath, os.O_RDWR|os.O_CREATE, 0600)
	if err != nil {
		return err
	}

	msg[0].Cmd = "g"
	msg[0].G = 1
	msg[0].N = src.hash

	request, _ := json.Marshal(msg)
	result, err := m.api_request(request)
	if err != nil {
		return err
	}

	err = json.Unmarshal(result, &res)
	if err != nil {
		return err
	}
	resourceUrl := res[0].G

	_, err = decryptAttr(src.meta.key, []byte(res[0].Attr))

	aes_block, _ := aes.NewCipher(src.meta.key)

	mac_data := a32_to_bytes([]uint32{0, 0, 0, 0})
	mac_enc := cipher.NewCBCEncrypter(aes_block, mac_data)
	t := bytes_to_a32(src.meta.iv)
	iv := a32_to_bytes([]uint32{t[0], t[1], t[0], t[1]})

	sorted_chunks := []int{}
	chunks := getChunkSizes(int(res[0].Size))
	chunk_macs := make([][]byte, len(chunks))

	for k, _ := range chunks {
		sorted_chunks = append(sorted_chunks, k)
	}
	sort.Ints(sorted_chunks)

	workch := make(chan int)
	errch := make(chan error, 1)
	wg := sync.WaitGroup{}

	// Fire chunk download workers
	for w := 0; w < m.dl_workers; w++ {
		wg.Add(1)

		go func() {
			defer wg.Done()
			var id int
			var ok bool

			for {
				// Wait for work blocked on channel
				select {
				case err := <-errch:
					errch <- err
					return
				case id, ok = <-workch:
					if ok == false {
						return
					}
				}

				var resource *http.Response
				mutex.Lock()
				chk_start := sorted_chunks[id]
				chk_size := chunks[chk_start]
				mutex.Unlock()
				chunk_url := fmt.Sprintf("%s/%d-%d", resourceUrl, chk_start, chk_start+chk_size-1)
				for retry := 0; retry < m.retries+1; retry++ {
					resource, err = client.Get(chunk_url)
					if err == nil {
						break
					}
				}

				var ctr_iv []uint32
				var ctr_aes cipher.Stream
				var chunk []byte

				if err == nil {
					ctr_iv = bytes_to_a32(src.meta.iv)
					ctr_iv[2] = uint32(uint64(chk_start) / 0x1000000000)
					ctr_iv[3] = uint32(chk_start / 0x10)
					ctr_aes = cipher.NewCTR(aes_block, a32_to_bytes(ctr_iv))
					chunk, err = ioutil.ReadAll(resource.Body)
				}

				if err != nil {
					errch <- err
					return
				}
				resource.Body.Close()
				ctr_aes.XORKeyStream(chunk, chunk)
				outfile.WriteAt(chunk, int64(chk_start))

				enc := cipher.NewCBCEncrypter(aes_block, iv)
				i := 0
				block := []byte{}
				chunk = paddnull(chunk, 16)
				for i = 0; i < len(chunk); i += 16 {
					block = chunk[i : i+16]
					enc.CryptBlocks(block, block)
				}

				mutex.Lock()
				if len(chunk_macs) > 0 {
					chunk_macs[id] = make([]byte, 16)
					copy(chunk_macs[id], block)
				}
				mutex.Unlock()

				if progress != nil {
					*progress <- chk_size
				}
			}
		}()
	}

	// Place chunk download jobs to chan
	for id := 0; id < len(chunks); {
		select {
		case workch <- id:
			id++
			if id == len(chunks) {
				close(workch)
				break
			}
		case err := <-errch:
			errch <- err
			break
		}
	}

	wg.Wait()

	select {
	case err = <-errch:
	default:
	}

	if err != nil {
		os.Remove(dstpath)
		return err
	}

	for _, v := range chunk_macs {
		mac_enc.CryptBlocks(mac_data, v)
	}

	outfile.Close()
	tmac := bytes_to_a32(mac_data)
	if bytes.Equal(a32_to_bytes([]uint32{tmac[0] ^ tmac[1], tmac[2] ^ tmac[3]}), src.meta.mac) == false {
		return EMACMISMATCH
	}

	return nil
}
Пример #30
0
func SyncGenieCopy(ch chan SyncGenieCopyProgress, new_file SyncGenieCopyItem) {
	var sf *os.File
	var df *os.File
	var err error

	sf, err = os.Open(new_file.from)
	if err != nil {
		log.Println("Copy: error: failed to open", new_file.from, "got error:", err)
		return
	}
	defer sf.Close()

	ss, err := os.Stat(new_file.from)

	if err != nil {
		log.Println("Copy: failed stat file to be copied, ", new_file.from, "; canceling copy")
		return
	}

	var ss_size = ss.Size()

	ds, err := os.Stat(new_file.to)

	var read_start int64 = 0
	var offset int64 = 0
	var wrote int64 = 0

	if err == nil {
		df, err = os.OpenFile(new_file.to, os.O_WRONLY, 0644)
		df.Seek(0, os.SEEK_SET)
		read_start = ds.Size()
		wrote = ds.Size()

	} else {
		df, err = os.Create(new_file.to)
	}

	if err != nil {
		log.Println("Copy: error: failed to open", new_file.to, "got error:", err)
		return
	}

	defer df.Close()

	buf := make([]byte, sync_genie_copy_buffer_size)

	sr := io.NewSectionReader(sf, read_start, ss.Size())

	progress_completion_states := [...]int{0, 0, 0, 0, 0}

	for {
		read, e := sr.ReadAt(buf, offset)

		if read == 0 {
			break
		}

		if e != nil && e != io.EOF {
			log.Println("Copy: failed read at offset", offset, "; read start:", read_start, "; read", read, "bytes; on file", new_file.from, "; deferring copy; error:", e)
			return
		}

		w, e := df.WriteAt(buf[0:read], read_start+offset)

		// log.Println("Copy: read", read, "bytes; wrote", w, "bytes", "; offset", offset)

		if int64(read) != int64(w) {
			log.Println("Copy: error: failed to write at offset", offset, "; error:", e)
			break
		}

		if e != nil {
			log.Println("Copy: error: failed to write with error:", e)
			break
		}

		wrote += int64(w)
		new_file.remaining -= int64(w)

		if ss_size-offset < sync_genie_copy_buffer_size {
			buf = make([]byte, ss_size-offset)
		}

		offset = offset + sync_genie_copy_buffer_size

		var completed int = int(float64(wrote) / float64(ss.Size()) * 100)

		if completed%25 == 0 && progress_completion_states[int(math.Floor(float64(completed)/25))] != 1 {
			var progress SyncGenieCopyProgress
			progress.file = filepath.Base(new_file.to)
			progress.progress = completed
			progress_completion_states[int(math.Floor(float64(completed)/25))] = 1
			ch <- progress
		}

		sync_genie_currently_copying[new_file.to] = wrote
	}

	switch size := wrote; {
	case size == 0:
		log.Println("Copy: error:", new_file.to, "copied 0 bytes, should have copied", new_file.remaining, "bytes")
	case new_file.remaining == 0:
		log.Println("Copy: done copying", new_file.from, "to", new_file.to, "(read", size, "bytes)")

		if sync_genie_config.run_when_done != "" {
			filtered := sync_genie_config.run_when_done

			filtered = strings.Replace(filtered, "{filename}", filepath.Base(new_file.to), -1)

			parts := strings.Split(filtered, " ")

			log.Println("Exec: running", parts)

			c := exec.Command(parts[0], parts[1:]...)

			err := c.Start()

			if err != nil {
				log.Println("Exec: failed to run command", parts)
			}

			err = c.Wait()

			if err != nil {
				log.Println("Exec: command,", filtered, "finished with error:", err)
			}
		}
		delete(sync_genie_currently_copying, new_file.to)

		sync_genie_history = append(sync_genie_history, new_file.to)

		b, err := json.Marshal(&sync_genie_history)

		if err == nil {
			e := ioutil.WriteFile(sync_genie_history_file, b, 0644)

			if e == nil {
				log.Println("History: updated history")
			} else {
				log.Println("History: failed to write history file,", sync_genie_history_file, "; error:", e)
			}
		} else {
			log.Println("History: failed to update history; error:", err)
		}

	}
}