Beispiel #1
1
func getTarFileBytes(file *os.File, path string) ([]byte, error) {
	_, err := file.Seek(0, 0)
	if err != nil {
		fmt.Errorf("error seeking file: %v", err)
	}

	var fileBytes []byte
	fileWalker := func(t *tarball.TarFile) error {
		if filepath.Clean(t.Name()) == path {
			fileBytes, err = ioutil.ReadAll(t.TarStream)
			if err != nil {
				return err
			}
		}

		return nil
	}

	tr := tar.NewReader(file)
	if err := tarball.Walk(*tr, fileWalker); err != nil {
		return nil, err
	}

	if fileBytes == nil {
		return nil, fmt.Errorf("file %q not found", path)
	}

	return fileBytes, nil
}
Beispiel #2
0
// readTombstoneV2 reads the second version of tombstone files that are capable
// of storing keys and the range of time for the key that points were deleted. This
// format is binary.
func (t *Tombstoner) readTombstoneV2(f *os.File) ([]Tombstone, error) {
	// Skip header, already checked earlier
	if _, err := f.Seek(4, os.SEEK_SET); err != nil {
		return nil, err
	}
	n := int64(4)

	fi, err := f.Stat()
	if err != nil {
		return nil, err
	}
	size := fi.Size()

	tombstones := []Tombstone{}
	var (
		min, max int64
		key      string
	)
	b := make([]byte, 4096)
	for {
		if n >= size {
			return tombstones, nil
		}

		if _, err = f.Read(b[:4]); err != nil {
			return nil, err
		}
		n += 4

		keyLen := int(binary.BigEndian.Uint32(b[:4]))
		if keyLen > len(b) {
			b = make([]byte, keyLen)
		}

		if _, err := f.Read(b[:keyLen]); err != nil {
			return nil, err
		}
		key = string(b[:keyLen])
		n += int64(keyLen)

		if _, err := f.Read(b[:8]); err != nil {
			return nil, err
		}
		n += 8

		min = int64(binary.BigEndian.Uint64(b[:8]))

		if _, err := f.Read(b[:8]); err != nil {
			return nil, err
		}
		n += 8
		max = int64(binary.BigEndian.Uint64(b[:8]))

		tombstones = append(tombstones, Tombstone{
			Key: key,
			Min: min,
			Max: max,
		})
	}
}
Beispiel #3
0
func getLogReader(logfile string, logf *os.File) (*bufio.Reader, error) {
	var rdr *bufio.Reader
	// Is this a gzip file?
	if path.Ext(logfile) == gzipext {
		gzrdr, err := gzip.NewReader(logf)
		if err != nil {
			return nil, err
		}
		rdr = bufio.NewReader(gzrdr)
	} else {
		// See if the file has shrunk. If so, read from the beginning.
		fi, err := logf.Stat()
		if err != nil {
			return nil, err
		}
		if fi.Size() < pos {
			pos = 0
		}
		logf.Seek(pos, os.SEEK_SET)
		fmt.Printf("Starting read at offset %d\n", pos)
		rdr = bufio.NewReader(logf)
	}

	return rdr, nil
}
func fileWriter(t *testing.T, file *os.File, logs []string) {
	filename := file.Name()
	time.Sleep(1 * time.Second) // wait for start Tail...

	for _, line := range logs {
		if strings.Index(line, RotateMarker) != -1 {
			log.Println("fileWriter: rename file => file.old")
			os.Rename(filename, filename+".old")
			file.Close()
			file, _ = os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0644)
			log.Println("fileWriter: re-opened file")
		} else if strings.Index(line, TruncateMarker) != -1 {
			time.Sleep(1 * time.Second)
			log.Println("fileWriter: truncate(file, 0)")
			os.Truncate(filename, 0)
			file.Seek(int64(0), os.SEEK_SET)
		}
		_, err := file.WriteString(line)
		log.Print("fileWriter: wrote ", line)
		if err != nil {
			log.Println("write failed", err)
		}
		time.Sleep(1 * time.Millisecond)
	}
	file.Close()
}
func testAccCreateZipFromFiles(files map[string]string, zipFile *os.File) error {
	zipFile.Truncate(0)
	zipFile.Seek(0, 0)

	w := zip.NewWriter(zipFile)

	for source, destination := range files {
		f, err := w.Create(destination)
		if err != nil {
			return err
		}

		fileContent, err := ioutil.ReadFile(source)
		if err != nil {
			return err
		}

		_, err = f.Write(fileContent)
		if err != nil {
			return err
		}
	}

	err := w.Close()
	if err != nil {
		return err
	}

	return w.Flush()
}
Beispiel #6
0
func (c *client) startDownload(id uint64, outPath string, detachment *pond.Message_Detachment) (cancel func()) {
	killChan := make(chan bool, 1)
	go func() {
		var tmp *os.File
		var err error
		if tmp, err = ioutil.TempFile("" /* default tmp dir */, "pond-download-"); err != nil {
			err = errors.New("failed to create temp file: " + err.Error())
		} else {
			os.Remove(tmp.Name())
			defer tmp.Close()
			err = c.downloadDetachment(c.backgroundChan, tmp, id, *detachment.Url, killChan)
			if err == nil {
				_, err := tmp.Seek(0, 0 /* from start */)
				if err == nil {
					err = saveDecrypted(c.backgroundChan, outPath, id, tmp, detachment, killChan)
				}
			}
		}
		if err == nil {
			c.backgroundChan <- DetachmentComplete{id, nil}
		} else {
			c.backgroundChan <- DetachmentError{id, err}
		}
		tmp.Close()
	}()
	return func() {
		killChan <- true
	}
}
Beispiel #7
0
//Skip skip
func (t *TTFParser) Skip(fd *os.File, length int) error {
	_, err := fd.Seek(int64(length), 1)
	if err != nil {
		return err
	}
	return nil
}
Beispiel #8
0
func (fm *FileMonitor) updateJournal(bytes_read int64) (ok bool) {
	var msg string
	var seekJournal *os.File
	var file_err error

	if bytes_read == 0 || fm.seekJournalPath == "." {
		return true
	}

	if seekJournal, file_err = os.OpenFile(fm.seekJournalPath,
		os.O_CREATE|os.O_RDWR|os.O_APPEND,
		0660); file_err != nil {
		msg = fmt.Sprintf("Error opening seek recovery log for append: %s", file_err.Error())
		fm.LogError(msg)
		return false
	}
	defer seekJournal.Close()
	seekJournal.Seek(0, os.SEEK_END)

	var filemon_bytes []byte
	filemon_bytes, _ = json.Marshal(fm)

	msg = string(filemon_bytes) + "\n"
	seekJournal.WriteString(msg)

	return true
}
Beispiel #9
0
func (r *Runner) uploadToS3(file *os.File, b *Build, boundary string) string {
	name := fmt.Sprintf("%s-build-%s-%s.txt", b.ID, b.Commit, time.Now().Format("2006-01-02-15-04-05"))
	url := fmt.Sprintf("https://s3.amazonaws.com/%s/%s", logBucket, name)

	if _, err := file.Seek(0, os.SEEK_SET); err != nil {
		log.Printf("failed to seek log file: %s\n", err)
		return ""
	}

	stat, err := file.Stat()
	if err != nil {
		log.Printf("failed to get log file size: %s\n", err)
		return ""
	}

	log.Printf("uploading build log to S3: %s\n", url)
	if err := s3attempts.Run(func() error {
		contentType := "multipart/mixed; boundary=" + boundary
		acl := "public-read"
		_, err := r.s3.PutObject(&s3.PutObjectRequest{
			Key:           &name,
			Body:          file,
			Bucket:        &logBucket,
			ACL:           &acl,
			ContentType:   &contentType,
			ContentLength: typeconv.Int64Ptr(stat.Size()),
		})
		return err
	}); err != nil {
		log.Printf("failed to upload build output to S3: %s\n", err)
	}
	return url
}
Beispiel #10
0
func NewSlowLogParser(file *os.File, stopChan <-chan bool, opt Options) *SlowLogParser {
	// Seek to the offset, if any.
	// @todo error if start off > file size
	if opt.StartOffset > 0 {
		// @todo handle error
		file.Seek(int64(opt.StartOffset), os.SEEK_SET)
	}

	if opt.Debug {
		l.SetFlags(l.Ltime | l.Lmicroseconds)
		fmt.Println()
		l.Println("parsing " + file.Name())
	}

	p := &SlowLogParser{
		stopChan:    stopChan,
		opt:         opt,
		file:        file,
		EventChan:   make(chan *log.Event),
		inHeader:    false,
		inQuery:     false,
		headerLines: 0,
		queryLines:  0,
		bytesRead:   opt.StartOffset,
		lineOffset:  0,
		event:       log.NewEvent(),
	}
	return p
}
Beispiel #11
0
func MakePrimitiveBlockReader(file *os.File) <-chan blockData {
	retval := make(chan blockData)

	go func() {
		file.Seek(0, os.SEEK_SET)
		for {
			filePosition, err := file.Seek(0, os.SEEK_CUR)

			blobHeader, err := ReadNextBlobHeader(file)
			if err == io.EOF {
				break
			} else if err != nil {
				println("Blob header read error:", err.Error())
				os.Exit(2)
			}

			blobBytes, err := readBlock(file, *blobHeader.Datasize)
			if err != nil {
				println("Blob read error:", err.Error())
				os.Exit(3)
			}

			retval <- blockData{blobHeader, blobBytes, filePosition}
		}
		close(retval)
	}()

	return retval
}
Beispiel #12
0
func WriteLobbyInfo(file *os.File, lobby *models.Lobby) {
	if !config.Constants.ChatLogsEnabled {
		return
	}
	file.Seek(0, os.SEEK_SET)
	//TODO: write lobby info to file
}
Beispiel #13
0
// returns string property prop
func (proc *SProcess) GetProperty(prop string) string {
	var (
		file *os.File
		err  error
	)

	// file exists
	if proc.files[prop] != nil {
		file = proc.files[prop]
		file.Seek(0, 0)

		// doesn't exist; create
	} else {
		file, err = os.Create("/system/process/" + strconv.Itoa(proc.pid) + "/" + prop)
		file.Chmod(0755)
	}

	// read up to 1024 bytes
	b := make([]byte, 1024)
	_, err = file.Read(b)

	// an error occured, and it was not an EOF
	if err != nil && err != io.EOF {
		return "(undefined)"
	}

	// file was more than 1M
	if err != io.EOF {
		return "(maxed out)"
	}

	return string(b)
}
Beispiel #14
0
func FindAndReplaceFd(fd *os.File, oldPattern, newPattern string) error {
	fbuf, err := ioutil.ReadAll(fd)
	if err != nil {
		return err
	}
	fd.Seek(0, -1)
	fd.Truncate(0)
	expr, err := regexp.Compile(oldPattern)
	if err != nil {
		return err
	}
	buffer := bytes.NewBuffer(fbuf)
	for {
		line, err := buffer.ReadString('\n')
		if err != nil {
			if err == io.EOF {
				break
			}
			return err
		}
		if expr.MatchString(line) {
			line = expr.ReplaceAllString(line, newPattern)
		}
		if _, err := fd.WriteString(line); err != nil {
			return err
		}
	}
	return nil
}
Beispiel #15
0
func (zipper ApplicationZipper) Zip(dirOrZipFilePath string, targetFile *os.File) error {
	if zipper.IsZipFile(dirOrZipFilePath) {
		zipFile, err := os.Open(dirOrZipFilePath)
		if err != nil {
			return err
		}
		defer zipFile.Close()

		_, err = io.Copy(targetFile, zipFile)
		if err != nil {
			return err
		}
	} else {
		err := writeZipFile(dirOrZipFilePath, targetFile)
		if err != nil {
			return err
		}
	}

	_, err := targetFile.Seek(0, os.SEEK_SET)
	if err != nil {
		return err
	}

	return nil
}
Beispiel #16
0
func mergeCoverprofile(file *os.File, out io.Writer) error {
	_, err := file.Seek(0, 0)
	if err != nil {
		return err
	}

	rd := bufio.NewReader(file)
	_, err = rd.ReadString('\n')
	if err == io.EOF {
		return nil
	}

	if err != nil {
		return err
	}

	_, err = io.Copy(out, rd)
	if err != nil {
		return err
	}

	err = file.Close()
	if err != nil {
		return err
	}

	return err
}
Beispiel #17
0
func dataLookup(fh *os.File, offset int64) []byte {
	_, err := fh.Seek(offset, os.SEEK_SET)
	if err != nil {
		log.Fatal(err)
	}

	buffer := make([]byte, BUFFSIZE) // initial size of the buffer is 3kb
	line := make([]byte, 0, BUFFSIZE)
	prevLen := 0
	for {
		prevLen = len(line)
		n, err := fh.Read(buffer) // we read the next 3kb (or less)
		if err != nil && err != io.EOF {
			log.Fatal(err)
		}
		line = append(line, buffer[:]...)
		until := bytes.IndexByte(buffer, '\n')
		if until > 0 { // We have a full line
			return line[:prevLen+until]
		}
		if err == io.EOF || n < BUFFSIZE {
			return line
		}
	}
	log.Fatal("We can't be here")
	return nil
}
Beispiel #18
0
func RewindFile(fileHandle *os.File) {
	_, rewindErr := fileHandle.Seek(0, 0)

	if rewindErr != nil {
		log.Fatalln("Unable to rewind file")
	}
}
func maxIpInPart(part *os.File) (ip3 int, max int) {
	ip3Array := make([]int, 1<<24, 1<<24)
	b := make([]byte, 1<<16*3)
	part.Seek(0, 0)
	for {
		n, err := part.Read(b)
		if n > 0 {
			for i := 0; i < n; i += 3 {
				ip3Array[int(Ip3BytesToUint32(b[i:i+3]))]++
			}

		}
		if err != nil {
			break
		}
	}

	for i := 0; i < len(ip3Array); i++ {
		if ip3Array[i] > max {
			max = ip3Array[i]
			ip3 = i
		}
	}
	return
}
Beispiel #20
0
func NewDraw(fp *os.File, labels []string) *Draw {
	w, h := termbox.Size()
	tab := INPROCESS

	inproc, arch, err := ReadCSV(fp)
	if err != nil {
		panic(err)
	}
	fp.Seek(0, 0)

	data := &Data{Labels: &LabelList{}}
	data.InProcess = inproc
	data.Archive = arch
	*data.Labels = labels

	view := mode.NewView(w, h)
	view.Reset()
	drawer := &mode.NormalDraw{
		View: *view,
		Tab:  tab,
	}
	drawer.SetLister(data.InProcess)

	return &Draw{
		Drawer: drawer,
		data:   data,
		tab:    tab,
		file:   fp,
	}
}
Beispiel #21
0
func (self *Reader) savePosition(file *os.File) {
	var err error
	self.position, err = file.Seek(0, os.SEEK_CUR)
	if err != nil {
		log.WithField("error", err).Error("Get position error")
	}
}
Beispiel #22
0
func (h *Harvester) initFileOffset(file *os.File) error {
	offset, err := file.Seek(0, os.SEEK_CUR)

	if h.getOffset() > 0 {
		// continue from last known offset

		logp.Debug("harvester",
			"harvest: %q position:%d (offset snapshot:%d)", h.Path, h.getOffset(), offset)
		_, err = file.Seek(h.getOffset(), os.SEEK_SET)
	} else if h.Config.TailFiles {
		// tail file if file is new and tail_files config is set

		logp.Debug("harvester",
			"harvest: (tailing) %q (offset snapshot:%d)", h.Path, offset)
		offset, err = file.Seek(0, os.SEEK_END)
		h.SetOffset(offset)

	} else {
		// get offset from file in case of encoding factory was
		// required to read some data.
		logp.Debug("harvester", "harvest: %q (offset snapshot:%d)", h.Path, offset)
		h.SetOffset(offset)
	}

	return err
}
Beispiel #23
0
func (self *Reader) getPosition(file *os.File) int64 {
	position, err := file.Seek(0, os.SEEK_CUR)
	if err != nil {
		log.WithField("error", err).Error("Get position error")
	}
	return position
}
Beispiel #24
0
func (wndb *WordNetDb) dataLookup(fh *os.File, offset int64) ([]byte, error) {
	_, err := fh.Seek(offset, os.SEEK_SET)
	if err != nil {
		return nil, err
	}

	buffer := make([]byte, BUFFSIZE) // initial size of the buffer is 3kb
	line := make([]byte, 0, BUFFSIZE)
	prevLen := 0
	for {
		prevLen = len(line)
		n, err := fh.Read(buffer) // we read the next 3kb (or less)
		if err != nil && err != io.EOF {
			return nil, err
		}
		line = append(line, buffer[:]...)
		until := bytes.IndexByte(buffer, '\n')
		if until > 0 { // We have a full line
			return line[:prevLen+until], nil
		}
		if err == io.EOF || n < BUFFSIZE {
			return line, nil
		}
	}
	return nil, ERR_MSG(UNREACHABLE_CODE)
}
func bundleReset(w *os.File, size int64) error {
	pending_offset := size - ENDSIZE
	if size == 0 {
		pending_offset = 0
	}
	offset, err := w.Seek(pending_offset, 0)
	if err != nil {
		return err
	}
	if offset != pending_offset {
		return errors.New("Failed to seek!")
	}
	err = w.Truncate(size)
	if err != nil {
		return err
	}
	if size == 0 {
		return nil
	}
	n, err := w.Write(zeroEnd)
	if err != nil || n != len(zeroEnd) {
		return errors.New("Failed to write end block")
	}
	return nil
}
Beispiel #26
0
// newFileEncoder creates a new encoder with current file offset for the page writer.
func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
	offset, err := f.Seek(0, os.SEEK_CUR)
	if err != nil {
		return nil, err
	}
	return newEncoder(f, prevCrc, int(offset)), nil
}
Beispiel #27
0
Datei: file.go Projekt: nhlfr/rkt
func extractEmbeddedLayer(file *os.File, layerTarPath string, outputPath string) (*os.File, error) {
	log.Info("Extracting ", layerTarPath, "\n")
	_, err := file.Seek(0, 0)
	if err != nil {
		return nil, fmt.Errorf("error seeking file: %v", err)
	}

	var layerFile *os.File
	fileWalker := func(t *tarball.TarFile) error {
		if filepath.Clean(t.Name()) == layerTarPath {
			layerFile, err = os.Create(outputPath)
			if err != nil {
				return fmt.Errorf("error creating layer: %v", err)
			}

			_, err = io.Copy(layerFile, t.TarStream)
			if err != nil {
				return fmt.Errorf("error getting layer: %v", err)
			}
		}

		return nil
	}

	tr := tar.NewReader(file)
	if err := tarball.Walk(*tr, fileWalker); err != nil {
		return nil, err
	}

	if layerFile == nil {
		return nil, fmt.Errorf("file %q not found", layerTarPath)
	}

	return layerFile, nil
}
Beispiel #28
0
func getParent(file *os.File, imgID string) (string, error) {
	var parent string

	_, err := file.Seek(0, 0)
	if err != nil {
		return "", fmt.Errorf("error seeking file: %v", err)
	}

	jsonPath := filepath.Join(imgID, "json")
	parentWalker := func(t *tarball.TarFile) error {
		if filepath.Clean(t.Name()) == jsonPath {
			jsonb, err := ioutil.ReadAll(t.TarStream)
			if err != nil {
				return fmt.Errorf("error reading layer json: %v", err)
			}

			var dockerData types.DockerImageData
			if err := json.Unmarshal(jsonb, &dockerData); err != nil {
				return fmt.Errorf("error unmarshaling layer data: %v", err)
			}

			parent = dockerData.Parent
		}

		return nil
	}

	tr := tar.NewReader(file)
	if err := tarball.Walk(*tr, parentWalker); err != nil {
		return "", err
	}

	return parent, nil
}
Beispiel #29
0
func (h *Harvester) open() *os.File {
	var file *os.File

	// Special handling that "-" means to read from standard input
	if h.Path == "-" {
		return os.Stdin
	}

	for {
		var err error
		file, err = os.Open(h.Path)

		if err != nil {
			// retry on failure.
			fmt.Printf("Failed opening %s: %s\n", h.Path, err)
			time.Sleep(5 * time.Second)
		} else {
			break
		}
	}

	// TODO(sissel): In the future, use the registrary to determine where to seek.
	// TODO(sissel): Only seek if the file is a file, not a pipe or socket.
	file.Seek(0, os.SEEK_END)

	return file
}
Beispiel #30
-11
func ringOrBuilder(fileName string) (r ring.Ring, b *ring.Builder, err error) {
	var f *os.File
	if f, err = os.Open(fileName); err != nil {
		return
	}
	var gf *gzip.Reader
	if gf, err = gzip.NewReader(f); err != nil {
		return
	}
	header := make([]byte, 16)
	if _, err = io.ReadFull(gf, header); err != nil {
		return
	}
	if string(header[:5]) == "RINGv" {
		gf.Close()
		if _, err = f.Seek(0, 0); err != nil {
			return
		}
		r, err = ring.LoadRing(f)
	} else if string(header[:12]) == "RINGBUILDERv" {
		gf.Close()
		if _, err = f.Seek(0, 0); err != nil {
			return
		}
		b, err = ring.LoadBuilder(f)
	}
	return
}