// this reads socketRead to fill up the given buffer unless an error is encountered or
// workSize zeros in a row are discovered, aligned with WorkSize, causing a flush
func readUntilNullWorkSizeBatch(socketRead io.ReadCloser,
	batch []byte, workSize int) (size int, err error) {
	err = nil
	size = 0
	if workSize == 0 {
		size, err = io.ReadFull(socketRead, batch)
	} else {
		lastCheckedForNull := 0
		for err == nil {
			var offset int
			offset, err = socketRead.Read(batch[size:])
			size += offset
			if err == nil && size < len(batch) {
				endCheck := size - (size % workSize)
				if zeroWorkItem(batch[lastCheckedForNull:endCheck], workSize) {
					if size%workSize != 0 {
						rem := workSize - (size % workSize)
						offset, err = io.ReadFull(socketRead, batch[size:size+rem])
						size += offset
					}
					return
				}
				lastCheckedForNull = endCheck // need to check partial work items
			} else {
				return
			}
		}
	}
	return
}
func (mfsm *MyFsm) Restore(inp io.ReadCloser) error {
	defer inp.Close()
	fmt.Printf("Restore......................\n")
	mfsm.mutex.Lock()
	defer mfsm.mutex.Unlock()
	var buffer bytes.Buffer
	readdata := make([]byte, 1024)
	for {
		n, err := inp.Read(readdata)
		if err != nil {
			panic(err)
		}
		if n < 1024 {
			if n > 0 {
				lastbytes := make([]byte, n)
				copy(readdata, lastbytes)
				buffer.Write(lastbytes)
			}
			break
		} else {
			buffer.Write(readdata)
		}
	}
	dec := gob.NewDecoder(&buffer)
	err := dec.Decode(&mfsm.data)
	errorOnExit(err)
	return nil
}
// this reads in a loop from socketRead putting batchSize bytes of work to copyTo until
// the socketRead is empty. Will always block until a full workSize of units have been copied
func readBuffer(copyTo chan<- []byte, socketRead io.ReadCloser, batchSize int, workSize int) {
	defer close(copyTo)
	for {
		batch := make([]byte, batchSize)
		size, err := socketRead.Read(batch)
		if err == nil && workSize != 0 && size%workSize != 0 {
			var lsize int
			lsize, err = io.ReadFull(
				socketRead,
				batch[size:size+workSize-(size%workSize)])
			size += lsize
		}
		if size > 0 {
			if err != nil && workSize != 0 {
				size -= (size % workSize)
			}
			copyTo <- batch[:size]
		}
		if err != nil {
			if err != io.EOF {
				log.Print("Error encountered in readBuffer:", err)
			}
			return
		}
	}
}
Exemple #4
0
func (pbp *PBP) Read(rc io.ReadCloser) error {
	binary.Read(rc, binary.LittleEndian, &pbp.cookie)
	if pbp.cookie == 0x464C457f {
		fmt.Printf("File is an elf, converting to empty PBP")
		bytes, _ := ioutil.ReadAll(rc)
		pbp.data[6] = append([]byte{0x7f, 0x45, 0x4c, 0x46}[:], bytes...)
		pbp.cookie = 0x50425000
		pbp.version = 0x00010000
		return nil
	}
	if pbp.cookie != 0x50425000 {
		return errors.New("bad cookie")
	}
	binary.Read(rc, binary.LittleEndian, &pbp.version)
	for i := 0; i < 8; i++ {
		binary.Read(rc, binary.LittleEndian, &pbp.offsets[i])
	}

	for i := 0; i < 7; i++ {
		pbp.data[i] = make([]byte, pbp.offsets[i+1]-pbp.offsets[i])
		if len(pbp.data[i]) > 0 {
			_, err := rc.Read(pbp.data[i])
			if err != nil {
				return err
			}
		}
	}
	var err error
	pbp.data[7], err = ioutil.ReadAll(rc)
	return err
}
Exemple #5
0
func (c *Client) runLoop(r io.ReadCloser, m ReadOnlyMetadata) {
	defer c.conn.Close()
	defer r.Close()

	// if we have mp3 and metadata to handle we use a specialized loop
	if c.meta {
		c.mp3Loop(r, m)
		return
	}

	var n, wn int
	var err error
	var p = make([]byte, 16384)

	log.Println("icecast.client: using non-mp3 loop")
	for {
		n, err = r.Read(p)
		if err != nil {
			return
		}

		wn, err = c.bufconn.Write(p[:n])
		if wn != n || err != nil {
			return
		}
	}
}
Exemple #6
0
func outForward(wg *sync.WaitGroup, msgType byte, out chan *command, input io.ReadCloser) {
	defer wg.Done()

	for {
		data := make([]byte, 4096*2-3) // do not share

		n, err := input.Read(data)

		if debug {
			logger.Println("forward from", msgType, n, err)
		}

		if n > 0 {
			out <- &command{msgType, data[0:n]}
		}

		if err == io.EOF {
			break
		}

		if err != nil {
			if debug {
				logger.Println(msgType, err)
			}
			fatal_if(err)
		}
	}
}
Exemple #7
0
func pumpSplitReader(source io.ReadCloser, bufSize uint64, readers []*splitReader) {
	bufs := [][]byte{make([]byte, bufSize), make([]byte, bufSize)}
	for i := 0; ; i = (i + 1) % 2 {
		buf := bufs[i]
		total := 0

		n, err := source.Read(buf)

		for _, rd := range readers {
			if rd.closed {
				continue
			}
			total += 1

			select {
			case rd.ch <- splitReaderChunk{buf[0:n], err}:
			case <-rd.close:
				rd.closed = true
				close(rd.ch)
			}
		}

		if total == 0 || err == io.EOF {
			break
		}
	}
	source.Close()
	for _, rd := range readers {
		if !rd.closed {
			close(rd.ch)
		}
	}
}
func saveToFile(body io.ReadCloser, path string) int64 {
	if body == nil {
		return 0
	}

	defer body.Close()

	f, err := os.Create(path)

	if err != nil {
		return 0
	}

	defer f.Close()

	buffer := make([]byte, 1024)
	var count int64 = 0

	var n int

	for {
		n, err = body.Read(buffer)
		if n <= 0 || (err != nil && err != io.EOF) {
			break
		}

		count += int64(n)
		f.Write(buffer[:n])
	}

	return count
}
Exemple #9
0
// read will decode the file
func (decoder *vorbisDecoder) read() error {
	var err error
	var channels, sampleRate int
	var reader io.ReadCloser
	reader, channels, sampleRate, err = vorbis.Decode(decoder.src)
	defer reader.Close()

	decoder.channels = int16(channels)
	decoder.sampleRate = int32(sampleRate)
	decoder.bitDepth = 16
	decoder.format = getFormat(decoder.channels, decoder.bitDepth)

	data := []byte{}
	for {
		tmp_buffer := make([]byte, 4097)
		n, err := reader.Read(tmp_buffer)
		if err == io.EOF {
			break
		}
		data = append(data, tmp_buffer[:n]...)
	}

	decoder.dataSize = int32(len(data))
	decoder.src = bytes.NewReader(data)
	decoder.duration = decoder.ByteOffsetToDur(int32(len(data)) / formatBytes[decoder.GetFormat()])

	return err
}
Exemple #10
0
// loadWav reads a valid wave file into a header and a bunch audio data into bytes.
// Invalid files return a nil header and an empty data slice.
// FUTURE: Handle the info block.
func (l *loader) loadWav(file io.ReadCloser) (wh *WavHdr, bytes []byte, err error) {
	wh = &WavHdr{}
	if err = binary.Read(file, binary.LittleEndian, wh); err != nil {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file: %s", err)
	}

	// check that it really is a WAVE file.
	riff, wave := string(wh.RiffId[:]), string(wh.WaveId[:])
	if riff != "RIFF" || wave != "WAVE" {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file")
	}

	// read the audio data.
	bytesRead := uint32(0)
	data := []byte{}
	inbuff := make([]byte, wh.DataSize)
	for bytesRead < wh.DataSize {
		inbytes, readErr := file.Read(inbuff)
		if readErr != nil {
			return nil, []byte{}, fmt.Errorf("Corrupt .wav audio file")
		}
		data = append(data, inbuff...)
		bytesRead += uint32(inbytes)
	}
	if bytesRead != wh.DataSize {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file %d %d", bytesRead, wh.DataSize)
	}
	return wh, data, nil
}
Exemple #11
0
func createTorrent(fileName string, f io.ReadCloser, r *http.Request) ([]byte, error) {
	chunk := make([]byte, TORRENT_PIECE_LENGTH)

	torrent := Torrent{
		Encoding: "UTF-8",
		Info: TorrentInfo{
			PieceLength: TORRENT_PIECE_LENGTH,
			Name:        fileName,
		},
		UrlList: []string{fmt.Sprintf("%sselif/%s", getSiteURL(r), fileName)},
	}

	for {
		n, err := f.Read(chunk)
		if err == io.EOF {
			break
		} else if err != nil {
			return []byte{}, err
		}

		torrent.Info.Length += n
		torrent.Info.Pieces += string(hashPiece(chunk[:n]))
	}

	data, err := bencode.EncodeBytes(&torrent)
	if err != nil {
		return []byte{}, err
	}

	return data, nil
}
Exemple #12
0
// loadWavFile reads a valid wave file into a header and a bunch audio data into bytes.
// Invalid files return a nil header and an empty data slice.
func (l loader) loadWav(file io.ReadCloser) (wh *waveHeader, bytes []byte, err error) {
	wh = &waveHeader{}
	if err = binary.Read(file, binary.LittleEndian, wh); err != nil {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file: %s", err)
	}

	// check that it really is a WAVE file.
	riff, wave := string(wh.RiffId[:]), string(wh.WaveId[:])
	if riff != "RIFF" || wave != "WAVE" {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file")
	}

	// read the audio data.
	bytesRead := uint32(0)
	data := make([]byte, wh.DataSize)
	for bytesRead < wh.DataSize {
		inbuff := make([]byte, wh.DataSize)
		inbytes, readErr := file.Read(inbuff)
		if readErr != nil {
			return nil, []byte{}, fmt.Errorf("Corrupt .wav audio file")
		}
		for cnt := 0; cnt < inbytes; cnt++ {
			data[bytesRead] = inbuff[cnt]
			bytesRead += 1
		}
	}
	if bytesRead != wh.DataSize {
		return nil, []byte{}, fmt.Errorf("Invalid .wav audio file")
	}
	return wh, data, nil
}
Exemple #13
0
// copy everything from the pty master to the websocket
// using base64 encoding for now due to limitations in term.js
func sendPtyOutputToConnection(conn *websocket.Conn, reader io.ReadCloser, finalizer *ReadWriteRoutingFinalizer) {
	defer closeConn(conn, finalizer)

	buf := make([]byte, 8192)
	var buffer bytes.Buffer

	for {
		n, err := reader.Read(buf)
		if err != nil {
			if !isNormalPtyError(err) {
				log.Printf("Failed to read from pty: %s", err)
			}
			return
		}

		i, err := normalizeBuffer(&buffer, buf, n)
		if err != nil {
			log.Printf("Cound't normalize byte buffer to UTF-8 sequence, due to an error: %s", err.Error())
			return
		}
		if err = conn.WriteMessage(websocket.TextMessage, buffer.Bytes()); err != nil {
			log.Printf("Failed to send websocket message: %s, due to occurred error %s", string(buffer.Bytes()), err.Error())
			return
		}
		buffer.Reset()
		if i < n {
			buffer.Write(buf[i:n])
		}
	}
}
Exemple #14
0
// ReadPopulate reads from jsonReader in order to fill in target
func ReadPopulate(jsonReader io.ReadCloser, target interface{}) (err error) {
	data := make([]byte, 0, 1024)
	chunk := make([]byte, 1024)
	for {
		var count int
		count, err = jsonReader.Read(chunk)
		data = append(data, chunk[:count]...)

		if err == io.EOF {
			jsonReader.Close()
			break
		}
		if err != nil {
			return
		}
	}

	if len(data) == 0 {
		err = nil
		return
	}

	err = json.Unmarshal(data, target)
	return
}
Exemple #15
0
func (accu *Accumulator) ReadFrom(reader io.ReadCloser) error {
	defer reader.Close()
	for {
		n, err := reader.Read(accu.buf[accu.writerIndex:])
		if err != nil {
			if err == io.EOF {
				return nil
			}
			return err
		}

		if n == 0 {
			continue
		}

		accu.writerIndex += n

		for readableBytes := accu.ReadableBytes(); readableBytes >= accu.lengthFieldLength; {

			var frameLength int
			switch accu.lengthFieldLength {
			case 2:
				frameLength = accu.getShort(accu.readerIndex)
			default:
				break
			}

			if frameLength > accu.maxPacketLength {
				return ErrTooLongFrame
			}

			if readableBytes >= frameLength {
				accu.handler.OnMessage(NewReadBuffer(accu.buf[accu.readerIndex+accu.lengthFieldLength : accu.readerIndex+frameLength]))
				accu.readerIndex += frameLength
				readableBytes -= frameLength
			} else {
				break
			}
		}

		if len(accu.buf)-accu.writerIndex <= 100 {
			// new buffer
			readableBytes := accu.ReadableBytes()
			if readableBytes == 0 {
				accu.buf = make([]byte, defaultBufferSize)
				accu.readerIndex = 0
				accu.writerIndex = 0
			} else {
				newLength := readableBytes + defaultBufferSize
				newBuf := make([]byte, newLength)
				copy(newBuf, accu.buf[accu.readerIndex:accu.writerIndex])
				accu.buf = newBuf
				accu.readerIndex = 0
				accu.writerIndex = readableBytes
			}
		}
	}
	return nil
}
Exemple #16
0
func encode(out *io.PipeWriter, in io.ReadCloser, enc string, level int) {
	var (
		e   encoder
		err error
	)

	defer func() {
		if e != nil {
			e.Close()
		}

		if err == nil {
			err = io.EOF
		}

		out.CloseWithError(err)
		in.Close()
	}()

	if level == flate.BestSpeed {
		pool := encoderPool(enc)
		pe := pool.Get()
		if pe != nil {
			e = pe.(encoder)
			defer pool.Put(pe)
		}
	}

	if e == nil {
		e, err = newEncoder(enc, level)
		if err != nil {
			return
		}
	}

	e.Reset(out)

	b := make([]byte, bufferSize)
	for {
		n, rerr := in.Read(b)
		if n > 0 {
			_, err = e.Write(b[:n])
			if err != nil {
				break
			}

			err = e.Flush()
			if err != nil {
				break
			}
		}

		if rerr != nil {
			err = rerr
			break
		}
	}
}
Exemple #17
0
func (self *rawxService) doGetChunk(rep http.ResponseWriter, req *http.Request) {
	inChunk, err := self.repo.Get(req.URL.Path)
	if inChunk != nil {
		defer inChunk.Close()
	}
	if err != nil {
		setError(rep, err)
		rep.WriteHeader(http.StatusInternalServerError)
		return
	}

	// Check if there is some compression
	var v []byte
	var in io.ReadCloser
	v, err = inChunk.GetAttr(AttrPrefix + AttrNameCompression)
	if err != nil {
		in = ioutil.NopCloser(inChunk)
		err = nil
	} else if bytes.Equal(v, AttrValueZLib) {
		in, err = zlib.NewReader(in)
	} else {
		in = nil
		err = ErrCompressionNotManaged
	}
	if in != nil {
		defer in.Close()
	}
	if err != nil {
		setError(rep, err)
		rep.WriteHeader(http.StatusInternalServerError)
		return
	}

	// Now transmit the clear data to the client
	length := inChunk.Size()
	for _, pair := range AttrMap {
		v, err := inChunk.GetAttr(AttrPrefix + pair.attr)
		if err != nil {
			rep.Header().Set(pair.header, string(v))
		}
	}
	rep.Header().Set("Content-Length", fmt.Sprintf("%v", length))
	rep.WriteHeader(200)
	buf := make([]byte, bufSize)
	for {
		n, err := in.Read(buf)
		if n > 0 {
			rep.Write(buf[:n])
		}
		if err != nil {
			if err != io.EOF {
				log.Printf("Write() error : %v", err)
			}
			break
		}
	}
}
Exemple #18
0
func readToCh(rc io.ReadCloser, out chan<- byte) {
	var b [1]byte
	n, _ := rc.Read(b[:])
	if n == 1 {
		out <- b[0]
	}
	rc.Close()
	close(out)
}
Exemple #19
0
func (c *consulFSM) Restore(old io.ReadCloser) error {
	defer old.Close()

	// Create a new state store
	state, err := NewStateStore(c.logOutput)
	if err != nil {
		return err
	}
	c.state.Close()
	c.state = state

	// Create a decoder
	var handle codec.MsgpackHandle
	dec := codec.NewDecoder(old, &handle)

	// Read in the header
	var header snapshotHeader
	if err := dec.Decode(&header); err != nil {
		return err
	}

	// Populate the new state
	msgType := make([]byte, 1)
	for {
		// Read the message type
		_, err := old.Read(msgType)
		if err == io.EOF {
			break
		} else if err != nil {
			return err
		}

		// Decode
		switch structs.MessageType(msgType[0]) {
		case structs.RegisterRequestType:
			var req structs.RegisterRequest
			if err := dec.Decode(&req); err != nil {
				return err
			}
			c.applyRegister(&req, header.LastIndex)

		case structs.KVSRequestType:
			var req structs.DirEntry
			if err := dec.Decode(&req); err != nil {
				return err
			}
			if err := c.state.KVSRestore(&req); err != nil {
				return err
			}

		default:
			return fmt.Errorf("Unrecognized msg type: %v", msgType)
		}
	}

	return nil
}
Exemple #20
0
func send(conn net.Conn, r io.ReadCloser) {
	var b [256]byte
	for {
		n, err := r.Read(b[0:100])
		if err != nil {
			break
		}
		conn.Write(b[0:n])
	}
}
Exemple #21
0
func (client *Client) readBiomeData(r io.ReadCloser, column *Column) (err error) {
	data := make([]byte, 256)
	_, err = r.Read(data)
	if err != nil {
		return err
	}

	column.Biomes = data
	return nil
}
Exemple #22
0
func (c *FakeClient) Put(path string, content io.ReadCloser, contentLength int64) error {
	c.PutPath = path
	contentBytes := make([]byte, contentLength)
	content.Read(contentBytes)
	defer content.Close()
	c.PutContents = string(contentBytes)
	c.PutContentLength = contentLength

	return c.PutErr
}
Exemple #23
0
// Code c/c from io.Copy() modified to handle escape sequence
func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) {
	if len(keys) == 0 {
		// Default keys : ctrl-p ctrl-q
		keys = []byte{16, 17}
	}
	buf := make([]byte, 32*1024)
	for {
		nr, er := src.Read(buf)
		if nr > 0 {
			// ---- Docker addition
			preservBuf := []byte{}
			for i, key := range keys {
				preservBuf = append(preservBuf, buf[0:nr]...)
				if nr != 1 || buf[0] != key {
					break
				}
				if i == len(keys)-1 {
					if err := src.Close(); err != nil {
						return 0, err
					}
					return 0, nil
				}
				nr, er = src.Read(buf)
			}
			var nw int
			var ew error
			if len(preservBuf) > 0 {
				nw, ew = dst.Write(preservBuf)
				nr = len(preservBuf)
			} else {
				// ---- End of docker
				nw, ew = dst.Write(buf[0:nr])
			}
			if nw > 0 {
				written += int64(nw)
			}
			if ew != nil {
				err = ew
				break
			}
			if nr != nw {
				err = io.ErrShortWrite
				break
			}
		}
		if er == io.EOF {
			break
		}
		if er != nil {
			err = er
			break
		}
	}
	return written, err
}
Exemple #24
0
func pipe(wg *sync.WaitGroup, buf *[]byte, p io.ReadCloser) {
	defer wg.Done()
	b := make([]byte, 4096)
	for {
		n, err := p.Read(b)
		*buf = append(*buf, b[:n]...)
		if err != nil {
			return
		}
	}
}
Exemple #25
0
func readToChan(fp io.ReadCloser, ch chan []byte) {
	for {
		buf := make([]byte, 1024)
		n, err := fp.Read(buf)
		if n > 0 {
			ch <- buf[:n]
		} else if err != nil {
			return
		}
	}
}
Exemple #26
0
func (s *ExportedStreams) Read(_ context.Context, a keybase1.ReadArg) (buf []byte, err error) {
	var r io.ReadCloser
	if r, err = s.GetReader(a.S); err != nil {
		return
	}
	var n int
	buf = make([]byte, a.Sz)
	n, err = r.Read(buf)
	buf = buf[0:n]
	return
}
/**
* Helper function to get the next line from our output. Currently not working
* on the stdout pipe instead of the whole output. TODO: use stdout instead of
* complete output - might be more efficient (memory wise?)?.
 */
func process_lines(stdout io.ReadCloser, out chan string, done *bool) {
	defer close(out)
	// Param: done
	//		this switch is necessary to let the lines processing goroutine
	// 		know that it has to stop, furthermore we need to empty the channel to
	//		avoid having a stuck lines processing goroutine (producer starvation)
	var (
		i    int
		a    int
		s    []byte = make([]byte, 0x1000)
		lbuf []byte = make([]byte, 0)
		nbuf []byte
		size int
		err  error
	)
	for true {
		size, err = stdout.Read(s)
		if err != nil {
			break
		}
		a = 0
		for i = 0; i < size && !*done; i++ {
			if s[i] == '\n' {
				if i > a || len(lbuf) > 0 { // skip empty lines (e.g. a=0, i=0), but then flush a non-empty lbuf
					if len(lbuf) > 0 {
						out <- string(lbuf) + string(s[a:i])
						lbuf = make([]byte, 0)
					} else {
						out <- string(s[a:i])
					}
				}
				a = i + 1
			}
		}
		// append remaining unprocessed output to lbuf
		if i > a && !*done {
			nbuf = make([]byte, len(lbuf)+i-a)
			copy(nbuf, lbuf)
			copy(nbuf[len(lbuf):], s[a:i])
			lbuf = nbuf
		}
	}
	// process remaining output
	if len(lbuf) > 0 && !*done {
		out <- string(lbuf)
	}
	// check error sequence (EOF is expected, anything else is unexpected)
	if err != nil {
		if err != io.EOF {
			infoLogger.Println("Error splitting lines:", err)
		}
	}
}
Exemple #28
0
func grab_feedback(stream io.ReadCloser, results chan [][]byte) {
	// TODO: Somehow, we need to return the err from this procedure, should
	// one occur.  It should be included in the resulting test run log.
	list := make([][]byte, 0)
	buf := make([]byte, 4096)
	for n, err := stream.Read(buf); (err == nil) && (n > 0); {
		list = append(list, buf[:n])
		buf = make([]byte, 4096)
		n, err = stream.Read(buf)
	}
	results <- list
}
Exemple #29
0
func getResult(out io.ReadCloser) (result string, err error) {
	n := 0
	buf := make([]byte, 1<<16)
	if runtime.GOOS == "windows" {
		gbk := mahonia.NewDecoder("gbk")
		reader := gbk.NewReader(out)
		n, err = reader.Read(buf)
	} else {
		n, err = out.Read(buf)
	}

	return string(buf[:n]), err
}
Exemple #30
0
// TODO: check output file size and rotate.
func flushStream(stdoutPipe io.ReadCloser, writer *bufio.Writer) {
	buffer := make([]byte, 100, 1000)
	for {
		n, err := stdoutPipe.Read(buffer)
		if err == io.EOF {
			stdoutPipe.Close()
			break
		}
		buffer = buffer[0:n]
		writer.Write(buffer)
		writer.Flush()
	}
}