示例#1
0
文件: common.go 项目: zooniverse/gonx
func main() {
	flag.Parse()

	// Read given file or from STDIN
	var file io.Reader
	if logFile == "dummy" {
		file = strings.NewReader(`89.234.89.123 [08/Nov/2013:13:39:18 +0000] "GET /api/foo/bar HTTP/1.1"`)
	} else if logFile == "-" {
		file = os.Stdin
	} else {
		file, err := os.Open(logFile)
		if err != nil {
			panic(err)
		}
		defer file.Close()
	}

	// Create reader and call Read method until EOF
	reader := gonx.NewReader(file, format)
	for {
		rec, err := reader.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			panic(err)
		}
		// Process the record... e.g.
		fmt.Printf("%+v\n", rec)
	}
}
示例#2
0
func main() {
	flag.Parse()

	// Create a parser based on given format
	parser := gonx.NewParser(format)

	// Read given file or from STDIN
	var file io.Reader
	if logFile == "dummy" {
		file = strings.NewReader(`89.234.89.123 [08/Nov/2013:13:39:18 +0000] "GET /t/100x100/foo/bar.jpeg HTTP/1.1" 200 1027 2430 0.014 "100x100" 10 1`)
	} else if logFile == "-" {
		file = os.Stdin
	} else {
		file, err := os.Open(logFile)
		if err != nil {
			panic(err)
		}
		defer file.Close()
	}

	// Make a chain of reducers to get some stats from log file
	reducer := gonx.NewChain(
		&gonx.Avg{[]string{"request_time", "read_time", "gen_time"}},
		&gonx.Sum{[]string{"body_bytes_sent"}},
		&gonx.Count{})
	output := gonx.MapReduce(file, parser, reducer)
	for res := range output {
		// Process the record... e.g.
		fmt.Printf("Parsed entry: %+v\n", res)
	}
}
示例#3
0
func readLGTM() (image.Image, error) {
	var lgtmFile io.Reader
	fmt.Print("reading LGTM png...")

	switch LGTM.(type) {
	case string:
		lgtmPath := LGTM.(string)

		if err := checkPNG(lgtmPath); err != nil {
			fmt.Println()
			return nil, err
		}

		lgtmFile, err := os.Open(lgtmPath)
		if err != nil {
			fmt.Println()
			return nil, err
		}
		defer lgtmFile.Close()
	case []byte:
		lgtmFile = bytes.NewReader(LGTM.([]byte))
	}

	lgtm, err := png.Decode(lgtmFile)
	if err != nil {
		fmt.Println()
		return nil, err
	}

	fmt.Println("done")
	return lgtm, nil
}
示例#4
0
文件: reader.go 项目: go-nosql/golang
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err os.Error) {
	d, err := newDecoder(r)
	if err != nil {
		return
	}

	// Check if we have the right number of strips, offsets and counts.
	rps := int(d.firstVal(tRowsPerStrip))
	numStrips := (d.config.Height + rps - 1) / rps
	if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips {
		return nil, FormatError("inconsistent header")
	}

	switch d.mode {
	case mGray, mGrayInvert:
		img = image.NewGray(d.config.Width, d.config.Height)
	case mPaletted:
		img = image.NewPaletted(d.config.Width, d.config.Height, d.palette)
	case mNRGBA:
		img = image.NewNRGBA(d.config.Width, d.config.Height)
	case mRGB, mRGBA:
		img = image.NewRGBA(d.config.Width, d.config.Height)
	}

	var p []byte
	for i := 0; i < numStrips; i++ {
		ymin := i * rps
		// The last strip may be shorter.
		if i == numStrips-1 && d.config.Height%rps != 0 {
			rps = d.config.Height % rps
		}
		offset := int64(d.features[tStripOffsets][i])
		n := int64(d.features[tStripByteCounts][i])
		switch d.firstVal(tCompression) {
		case cNone:
			// TODO(bsiegert): Avoid copy if r is a tiff.buffer.
			p = make([]byte, 0, n)
			_, err = d.r.ReadAt(p, offset)
		case cLZW:
			r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
			p, err = ioutil.ReadAll(r)
			r.Close()
		case cDeflate, cDeflateOld:
			r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n))
			if err != nil {
				return nil, err
			}
			p, err = ioutil.ReadAll(r)
			r.Close()
		default:
			err = UnsupportedError("compression")
		}
		if err != nil {
			return
		}
		err = d.decode(img, p, ymin, ymin+rps)
	}
	return
}
示例#5
0
// putTargets writes to URL from reader.
func putTargets(targetURLs []string, length int64, reader io.Reader) error {
	var tgtReaders []io.ReadCloser
	var tgtWriters []io.WriteCloser
	var tgtClients []client.Client

	for _, targetURL := range targetURLs {
		tgtClient, err := target2Client(targetURL)
		if err != nil {
			return iodine.New(err, nil)
		}
		tgtClients = append(tgtClients, tgtClient)
		tgtReader, tgtWriter := io.Pipe()
		tgtReaders = append(tgtReaders, tgtReader)
		tgtWriters = append(tgtWriters, tgtWriter)
	}

	go func() {
		var writers []io.Writer
		for _, tgtWriter := range tgtWriters {
			defer tgtWriter.Close()
			writers = append(writers, io.Writer(tgtWriter))
		}
		multiTgtWriter := io.MultiWriter(writers...)
		io.CopyN(multiTgtWriter, reader, length)
	}()

	var wg sync.WaitGroup
	errorCh := make(chan error, len(tgtClients))

	func() { // Parallel putObject
		defer close(errorCh) // Each routine gets to return one err status.

		for i := range tgtClients {
			wg.Add(1)
			// make local copy for go routine
			tgtClient := tgtClients[i]
			tgtReader := tgtReaders[i]

			go func(targetClient client.Client, reader io.ReadCloser, errorCh chan<- error) {
				defer wg.Done()
				err := targetClient.PutObject(length, reader)
				if err != nil {
					errorCh <- iodine.New(err, map[string]string{"failedURL": targetClient.URL().String()})
					reader.Close()
					return
				}
				errorCh <- err // return nil error = success.
			}(tgtClient, tgtReader, errorCh)
		}
		wg.Wait()
	}()

	for err := range errorCh {
		if err != nil { // Return on first error encounter.
			return err
		}
	}
	return nil // success.
}
示例#6
0
文件: nginx.go 项目: zooniverse/gonx
func main() {
	flag.Parse()

	// Read given file or from STDIN
	var file io.Reader
	if logFile == "dummy" {
		file = strings.NewReader(`89.234.89.123 [08/Nov/2013:13:39:18 +0000] "GET /api/foo/bar HTTP/1.1"`)
	} else if logFile == "-" {
		file = os.Stdin
	} else {
		file, err := os.Open(logFile)
		if err != nil {
			panic(err)
		}
		defer file.Close()
	}

	// Use nginx config file to extract format by the name
	var nginxConfig io.Reader
	if conf == "dummy" {
		nginxConfig = strings.NewReader(`
            http {
                log_format   main  '$remote_addr [$time_local] "$request"';
            }
        `)
	} else {
		nginxConfig, err := os.Open(conf)
		if err != nil {
			panic(err)
		}
		defer nginxConfig.Close()
	}

	// Read from STDIN and use log_format to parse log records
	reader, err := gonx.NewNginxReader(file, nginxConfig, format)
	if err != nil {
		panic(err)
	}
	for {
		rec, err := reader.Read()
		if err == io.EOF {
			break
		} else if err != nil {
			panic(err)
		}
		// Process the record... e.g.
		fmt.Printf("%+v\n", rec)
	}
}
示例#7
0
// returns a reader for the right compression
func (s *scanner) compressionReader(r io.Reader, header *thrift.PageHeader) (io.Reader, error) {
	switch s.codec {
	case thrift.CompressionCodec_GZIP:
		r, err := gzip.NewReader(r)
		if err != nil {
			return nil, fmt.Errorf("could not create gzip reader:%s", err)
		}
		b, err := ioutil.ReadAll(r)
		if err != nil {
			return nil, fmt.Errorf("could not read gzip reader:%s", err)
		}
		if err := r.Close(); err != nil {
			log.Println("WARNING error closing gzip reader:%s", err)
		}
		return bytes.NewReader(b), nil

	case thrift.CompressionCodec_LZO:
		// https://github.com/rasky/go-lzo/blob/master/decompress.go#L149			s.r = r
		return nil, fmt.Errorf("NYI")

	case thrift.CompressionCodec_SNAPPY:
		src, err := ioutil.ReadAll(r)
		if err != nil {
			return nil, fmt.Errorf("could not create gzip reader:%s", err)
		}

		out := make([]byte, int(header.GetUncompressedPageSize()))
		out, err = snappy.Decode(out, src)
		if err != nil {
			return nil, fmt.Errorf("could not create gzip reader:%s", err)
		}

		return bytes.NewReader(out), nil

	case thrift.CompressionCodec_UNCOMPRESSED:
		// use the same reader
		return r, nil

	default:
		return nil, fmt.Errorf("unknown compression format %s", s.codec)
	}
}
示例#8
0
// putTargets writes to URL from reader. If length=0, read until EOF.
func putTargets(targetURLs []string, length int64, reader io.Reader) *probe.Error {
	var tgtReaders []*io.PipeReader
	var tgtWriters []*io.PipeWriter
	var tgtClients []client.Client
	errCh := make(chan *probe.Error)
	defer close(errCh)

	for _, targetURL := range targetURLs {
		tgtClient, err := url2Client(targetURL)
		if err != nil {
			return err.Trace(targetURL)
		}
		tgtClients = append(tgtClients, tgtClient)
		tgtReader, tgtWriter := io.Pipe()
		tgtReaders = append(tgtReaders, tgtReader)
		tgtWriters = append(tgtWriters, tgtWriter)
	}

	go func() {
		var writers []io.Writer
		for _, tgtWriter := range tgtWriters {
			writers = append(writers, io.Writer(tgtWriter))
		}

		multiTgtWriter := io.MultiWriter(writers...)
		var e error
		switch length {
		case 0:
			_, e = io.Copy(multiTgtWriter, reader)
		default:
			_, e = io.CopyN(multiTgtWriter, reader, length)
		}
		for _, tgtWriter := range tgtWriters {
			if e != nil {
				tgtWriter.CloseWithError(e)
			}
			tgtWriter.Close()
		}
	}()

	var wg sync.WaitGroup
	errorCh := make(chan *probe.Error, len(tgtClients))

	func() { // Parallel putObject
		defer close(errorCh) // Each routine gets to return one err status.
		for i := range tgtClients {
			wg.Add(1)
			// make local copy for go routine
			tgtClient := tgtClients[i]
			tgtReader := tgtReaders[i]

			go func(targetClient client.Client, reader io.ReadCloser, errorCh chan<- *probe.Error) {
				defer wg.Done()
				defer reader.Close()
				err := targetClient.PutObject(length, reader)
				if err != nil {
					errorCh <- err.Trace()
					return
				}
			}(tgtClient, tgtReader, errorCh)
		}
		wg.Wait()
	}()

	// Return on first error encounter.
	err := <-errorCh
	if err != nil {
		return err.Trace()
	}

	return nil // success.
}
示例#9
0
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err error) {
	d, err := newDecoder(r)
	if err != nil {
		return
	}

	blockPadding := false
	blockWidth := d.config.Width
	blockHeight := d.config.Height
	blocksAcross := 1
	blocksDown := 1

	if d.config.Width == 0 {
		blocksAcross = 0
	}
	if d.config.Height == 0 {
		blocksDown = 0
	}

	var blockOffsets, blockCounts []uint

	if int(d.firstVal(tTileWidth)) != 0 {
		blockPadding = true

		blockWidth = int(d.firstVal(tTileWidth))
		blockHeight = int(d.firstVal(tTileLength))

		if blockWidth != 0 {
			blocksAcross = (d.config.Width + blockWidth - 1) / blockWidth
		}
		if blockHeight != 0 {
			blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
		}

		blockCounts = d.features[tTileByteCounts]
		blockOffsets = d.features[tTileOffsets]

	} else {
		if int(d.firstVal(tRowsPerStrip)) != 0 {
			blockHeight = int(d.firstVal(tRowsPerStrip))
		}

		if blockHeight != 0 {
			blocksDown = (d.config.Height + blockHeight - 1) / blockHeight
		}

		blockOffsets = d.features[tStripOffsets]
		blockCounts = d.features[tStripByteCounts]
	}

	// Check if we have the right number of strips/tiles, offsets and counts.
	if n := blocksAcross * blocksDown; len(blockOffsets) < n || len(blockCounts) < n {
		return nil, FormatError("inconsistent header")
	}

	imgRect := image.Rect(0, 0, d.config.Width, d.config.Height)
	switch d.mode {
	case mGray, mGrayInvert:
		if d.bpp == 16 {
			img = image.NewGray16(imgRect)
		} else {
			img = image.NewGray(imgRect)
		}
	case mPaletted:
		img = image.NewPaletted(imgRect, d.palette)
	case mNRGBA:
		if d.bpp == 16 {
			img = image.NewNRGBA64(imgRect)
		} else {
			img = image.NewNRGBA(imgRect)
		}
	case mRGB, mRGBA:
		if d.bpp == 16 {
			img = image.NewRGBA64(imgRect)
		} else {
			img = image.NewRGBA(imgRect)
		}
	}

	for i := 0; i < blocksAcross; i++ {
		blkW := blockWidth
		if !blockPadding && i == blocksAcross-1 && d.config.Width%blockWidth != 0 {
			blkW = d.config.Width % blockWidth
		}
		for j := 0; j < blocksDown; j++ {
			blkH := blockHeight
			if !blockPadding && j == blocksDown-1 && d.config.Height%blockHeight != 0 {
				blkH = d.config.Height % blockHeight
			}
			offset := int64(blockOffsets[j*blocksAcross+i])
			n := int64(blockCounts[j*blocksAcross+i])
			switch d.firstVal(tCompression) {

			// According to the spec, Compression does not have a default value,
			// but some tools interpret a missing Compression value as none so we do
			// the same.
			case cNone, 0:
				if b, ok := d.r.(*buffer); ok {
					d.buf, err = b.Slice(int(offset), int(n))
				} else {
					d.buf = make([]byte, n)
					_, err = d.r.ReadAt(d.buf, offset)
				}
			case cLZW:
				r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
				d.buf, err = ioutil.ReadAll(r)
				r.Close()
			case cDeflate, cDeflateOld:
				var r io.ReadCloser
				r, err = zlib.NewReader(io.NewSectionReader(d.r, offset, n))
				if err != nil {
					return nil, err
				}
				d.buf, err = ioutil.ReadAll(r)
				r.Close()
			case cPackBits:
				d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n))
			default:
				err = UnsupportedError(fmt.Sprintf("compression value %d", d.firstVal(tCompression)))
			}
			if err != nil {
				return nil, err
			}

			xmin := i * blockWidth
			ymin := j * blockHeight
			xmax := xmin + blkW
			ymax := ymin + blkH
			err = d.decode(img, xmin, ymin, xmax, ymax)
			if err != nil {
				return nil, err
			}
		}
	}
	return
}
示例#10
0
// Decode reads a TIFF image from r and returns it as an image.Image.
// The type of Image returned depends on the contents of the TIFF.
func Decode(r io.Reader) (img image.Image, err error) {
	d, err := newDecoder(r)
	if err != nil {
		return
	}

	// Check if we have the right number of strips, offsets and counts.
	rps := int(d.firstVal(tRowsPerStrip))
	if rps == 0 {
		// Assume only one strip.
		rps = d.config.Height
	}
	numStrips := (d.config.Height + rps - 1) / rps
	if rps == 0 || len(d.features[tStripOffsets]) < numStrips || len(d.features[tStripByteCounts]) < numStrips {
		return nil, FormatError("inconsistent header")
	}

	switch d.mode {
	case mGray, mGrayInvert:
		img = image.NewGray(image.Rect(0, 0, d.config.Width, d.config.Height))
	case mPaletted:
		img = image.NewPaletted(image.Rect(0, 0, d.config.Width, d.config.Height), d.palette)
	case mNRGBA:
		img = image.NewNRGBA(image.Rect(0, 0, d.config.Width, d.config.Height))
	case mRGB, mRGBA:
		img = image.NewRGBA(image.Rect(0, 0, d.config.Width, d.config.Height))
	}

	for i := 0; i < numStrips; i++ {
		ymin := i * rps
		// The last strip may be shorter.
		if i == numStrips-1 && d.config.Height%rps != 0 {
			rps = d.config.Height % rps
		}
		offset := int64(d.features[tStripOffsets][i])
		n := int64(d.features[tStripByteCounts][i])
		switch d.firstVal(tCompression) {
		case cNone:
			if b, ok := d.r.(*buffer); ok {
				d.buf, err = b.Slice(int(offset), int(n))
			} else {
				d.buf = make([]byte, n)
				_, err = d.r.ReadAt(d.buf, offset)
			}
		case cLZW:
			r := lzw.NewReader(io.NewSectionReader(d.r, offset, n), lzw.MSB, 8)
			d.buf, err = ioutil.ReadAll(r)
			r.Close()
		case cDeflate, cDeflateOld:
			r, err := zlib.NewReader(io.NewSectionReader(d.r, offset, n))
			if err != nil {
				return nil, err
			}
			d.buf, err = ioutil.ReadAll(r)
			r.Close()
		case cPackBits:
			d.buf, err = unpackBits(io.NewSectionReader(d.r, offset, n))
		default:
			err = UnsupportedError("compression")
		}
		if err != nil {
			return
		}
		err = d.decode(img, ymin, ymin+rps)
	}
	return
}