示例#1
1
// decoder reads an image from r and modifies the image as defined by opts.
// swapDimensions indicates the decoded image will be rotated after being
// returned, and when interpreting opts, the post-rotation dimensions should
// be considered.
// The decoded image is returned in im. The registered name of the decoder
// used is returned in format. If the image was not successfully decoded, err
// will be non-nil.  If the decoded image was made smaller, needRescale will
// be true.
func decode(r io.Reader, opts *DecodeOpts, swapDimensions bool) (im image.Image, format string, err error, needRescale bool) {
	if opts == nil {
		// Fall-back to normal decode.
		im, format, err = image.Decode(r)
		return im, format, err, false
	}

	var buf bytes.Buffer
	tr := io.TeeReader(r, &buf)
	ic, format, err := image.DecodeConfig(tr)
	if err != nil {
		return nil, "", err, false
	}

	mr := io.MultiReader(&buf, r)
	b := image.Rect(0, 0, ic.Width, ic.Height)
	sw, sh, needRescale := opts.rescaleDimensions(b, swapDimensions)
	if !needRescale {
		im, format, err = image.Decode(mr)
		return im, format, err, false
	}

	imageDebug(fmt.Sprintf("Resizing from %dx%d -> %dx%d", ic.Width, ic.Height, sw, sh))
	if format == "cr2" {
		// Replace mr with an io.Reader to the JPEG thumbnail embedded in a
		// CR2 image.
		if mr, err = cr2.NewReader(mr); err != nil {
			return nil, "", err, false
		}
		format = "jpeg"
	}

	if format == "jpeg" && fastjpeg.Available() {
		factor := fastjpeg.Factor(ic.Width, ic.Height, sw, sh)
		if factor > 1 {
			var buf bytes.Buffer
			tr := io.TeeReader(mr, &buf)
			im, err = fastjpeg.DecodeDownsample(tr, factor)
			switch err.(type) {
			case fastjpeg.DjpegFailedError:
				log.Printf("Retrying with jpeg.Decode, because djpeg failed with: %v", err)
				im, err = jpeg.Decode(io.MultiReader(&buf, mr))
			case nil:
				// fallthrough to rescale() below.
			default:
				return nil, format, err, false
			}
			return rescale(im, sw, sh), format, err, true
		}
	}

	// Fall-back to normal decode.
	im, format, err = image.Decode(mr)
	if err != nil {
		return nil, "", err, false
	}
	return rescale(im, sw, sh), format, err, needRescale
}
示例#2
0
文件: main.go 项目: rjeczalik/cmd
func prepend(dst string) (err error) {
	tmp, err := ioutil.TempFile(filepath.Split(dst))
	if err != nil {
		return err
	}
	rdst, err := os.Open(dst)
	if err != nil {
		return nonil(err, tmp.Close(), os.Remove(tmp.Name()))
	}
	var errCleanup error
	defer func() {
		switch errCleanup {
		case nil:
			if err = nonil(tmp.Close(), rdst.Close()); err != nil {
				os.Remove(tmp.Name())
			}
			// os.Rename fails under Windows if destination file exists.
			if err = os.Remove(dst); err != nil {
				os.Remove(tmp.Name())
			}
			if err = os.Rename(tmp.Name(), dst); err != nil {
				err = errors.New(err.Error() + " (prepended content is safe under " + tmp.Name() + ")")
			}
		default:
			nonil(tmp.Close(), rdst.Close(), os.Remove(tmp.Name()))
			if errCleanup != errNop {
				err = errCleanup
			}
		}
	}()
	var r io.Reader
	fi, err := os.Stdin.Stat()
	if err != nil {
		errCleanup = err
		return
	}
	switch {
	case src != "":
		f, err := os.Open(src)
		if err != nil {
			errCleanup = err
			return err
		}
		defer f.Close()
		r = f
	case fi.Mode()&os.ModeCharDevice == 0: // stackoverflow.com/questions/22744443
		r = io.MultiReader(bytes.NewReader(stdin.Bytes()), io.TeeReader(os.Stdin, &stdin))
	default:
		errCleanup = errNop
	}
	if unique {
		r = multiunique(nop(r), rdst)
	} else {
		r = io.MultiReader(nop(r), rdst)
	}
	_, errCleanup = io.Copy(tmp, r)
	return err
}
示例#3
0
// MIMETypeFromReader takes a reader, sniffs the beginning of it,
// and returns the mime (if sniffed, else "") and a new reader
// that's the concatenation of the bytes sniffed and the remaining
// reader.
func MIMETypeFromReader(r io.Reader) (mime string, reader io.Reader) {
	var buf bytes.Buffer
	_, err := io.Copy(&buf, io.LimitReader(r, 1024))
	mime = MIMEType(buf.Bytes())
	if err != nil {
		return mime, io.MultiReader(&buf, errReader{err})
	}
	return mime, io.MultiReader(&buf, r)
}
示例#4
0
文件: v1.go 项目: nawawi/drive
// encrypter returns the encrypted reader passed on the keys and IV provided.
func encrypter(r io.Reader, aesKey, hmacKey, iv, header []byte) (io.Reader, error) {
	b, err := aes.NewCipher(aesKey)
	if err != nil {
		return nil, err
	}
	h := hmac.New(hashFunc, hmacKey)
	hr := &hashReadWriter{hash: h}
	sr := &cipher.StreamReader{R: r, S: cipher.NewCTR(b, iv)}
	return io.MultiReader(io.TeeReader(io.MultiReader(bytes.NewReader(header), sr), hr), hr), nil
}
示例#5
0
文件: main.go 项目: rjeczalik/fs
func gotree(root string, printroot bool, spy memfs.FS, w io.Writer) (err error) {
	var (
		r      io.Reader
		pr, pw = io.Pipe()
		ch     = make(chan error, 1)
		ndir   int
		nfile  int
		fn     filepath.WalkFunc
	)
	if dir {
		fn = countdirdelfile(&ndir, spy)
	} else {
		fn = countdirfile(&ndir, &nfile)
	}
	if err = spy.Walk(string(os.PathSeparator), fn); err != nil {
		return
	}
	go func() {
		ch <- nonnil(memfs.Unix.Encode(spy, pw), pw.Close())
	}()
	switch {
	case dir && printroot:
		r = io.MultiReader(
			strings.NewReader(fmt.Sprintf("%s%c", root, os.PathSeparator)),
			pr,
			strings.NewReader(fmt.Sprintf("\n%d directories\n", ndir-1)),
		)
	case dir:
		r = io.MultiReader(
			pr,
			strings.NewReader(fmt.Sprintf("\n%d directories\n", ndir-1)),
		)
	case printroot:
		r = io.MultiReader(
			strings.NewReader(fmt.Sprintf("%s%c", root, os.PathSeparator)),
			pr,
			strings.NewReader(fmt.Sprintf("\n%d directories, %d files\n", ndir-1, nfile)),
		)
	default:
		r = io.MultiReader(
			pr,
			strings.NewReader(fmt.Sprintf("\n%d directories, %d files\n", ndir-1, nfile)),
		)
	}
	_, err = io.Copy(w, r)
	if e := <-ch; e != nil && err == nil {
		err = e
	}
	return
}
示例#6
0
func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
	archive, err := TarWithOptions(origin, options)
	if err != nil {
		t.Fatal(err)
	}
	defer archive.Close()

	buf := make([]byte, 10)
	if _, err := archive.Read(buf); err != nil {
		return nil, err
	}
	wrap := io.MultiReader(bytes.NewReader(buf), archive)

	detectedCompression := DetectCompression(buf)
	compression := options.Compression
	if detectedCompression.Extension() != compression.Extension() {
		return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
	}

	tmp, err := ioutil.TempDir("", "docker-test-untar")
	if err != nil {
		return nil, err
	}
	defer os.RemoveAll(tmp)
	if err := Untar(wrap, tmp, nil); err != nil {
		return nil, err
	}
	if _, err := os.Stat(tmp); err != nil {
		return nil, err
	}

	return ChangesDirs(origin, tmp)
}
示例#7
0
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
//
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
func (r rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
	// Do we have enough shards?
	if len(shards) < r.r.DataShards {
		return ErrTooFewShards
	}

	// Trim off parity shards if any
	shards = shards[:r.r.DataShards]
	for i := range shards {
		if shards[i] == nil {
			return StreamReadError{Err: ErrShardNoData, Stream: i}
		}
	}
	// Join all shards
	src := io.MultiReader(shards...)

	// Copy data to dst
	n, err := io.CopyN(dst, src, outSize)
	if err == io.EOF {
		return ErrShortData
	}
	if err != nil {
		return err
	}
	if n != outSize {
		return ErrShortData
	}
	return nil
}
示例#8
0
func smudgeCommand(cmd *cobra.Command, args []string) {
	requireStdin("This command should be run by the Git 'smudge' filter")
	lfs.InstallHooks(false)

	// keeps the initial buffer from lfs.DecodePointer
	b := &bytes.Buffer{}
	r := io.TeeReader(os.Stdin, b)

	ptr, err := lfs.DecodePointer(r)
	if err != nil {
		mr := io.MultiReader(b, os.Stdin)
		_, err := io.Copy(os.Stdout, mr)
		if err != nil {
			Panic(err, "Error writing data to stdout:")
		}
		return
	}

	if smudgeInfo {
		localPath, err := lfs.LocalMediaPath(ptr.Oid)
		if err != nil {
			Exit(err.Error())
		}

		stat, err := os.Stat(localPath)
		if err != nil {
			Print("%d --", ptr.Size)
		} else {
			Print("%d %s", stat.Size(), localPath)
		}
		return
	}

	filename := smudgeFilename(args, err)
	cb, file, err := lfs.CopyCallbackFile("smudge", filename, 1, 1)
	if err != nil {
		Error(err.Error())
	}

	cfg := lfs.Config
	download := lfs.FilenamePassesIncludeExcludeFilter(filename, cfg.FetchIncludePaths(), cfg.FetchExcludePaths())

	if smudgeSkip || lfs.Config.GetenvBool("GIT_LFS_SKIP_SMUDGE", false) {
		download = false
	}

	err = ptr.Smudge(os.Stdout, filename, download, cb)
	if file != nil {
		file.Close()
	}

	if err != nil {
		ptr.Encode(os.Stdout)
		// Download declined error is ok to skip if we weren't requesting download
		if !(lfs.IsDownloadDeclinedError(err) && !download) {
			LoggedError(err, "Error accessing media: %s (%s)", filename, ptr.Oid)
			os.Exit(2)
		}
	}
}
示例#9
0
// Stream returns one channel that combines the stdout and stderr of the command
// as it is run on the remote machine, and another that sends true when the
// command is done. The sessions and channels will then be closed.
func (ssh_conf *ClientSSH) Stream(command string) (output chan string, done chan bool, err error) {
	// connect to remote host
	session, err := ssh_conf.connect()
	if err != nil {
		return output, done, err
	}
	// connect to both outputs (they are of type io.Reader)
	outReader, err := session.StdoutPipe()
	if err != nil {
		return output, done, err
	}
	errReader, err := session.StderrPipe()
	if err != nil {
		return output, done, err
	}
	// combine outputs, create a line-by-line scanner
	outputReader := io.MultiReader(outReader, errReader)
	err = session.Start(command)
	scanner := bufio.NewScanner(outputReader)
	// continuously send the command's output over the channel
	outputChan := make(chan string)
	done = make(chan bool)
	go func(scanner *bufio.Scanner, out chan string, done chan bool) {
		defer close(outputChan)
		defer close(done)
		for scanner.Scan() {
			outputChan <- scanner.Text()
		}
		// close all of our open resources
		done <- true
		session.Close()
	}(scanner, outputChan, done)
	return outputChan, done, err
}
func (s *Command) Run() error {
	if s.client == nil {
		return errors.New("Not connected")
	}

	session, err := s.client.NewSession()
	if err != nil {
		return err
	}

	var envVariables bytes.Buffer
	for _, keyValue := range s.Environment {
		envVariables.WriteString("export " + strconv.Quote(keyValue) + "\n")
	}

	session.Stdin = io.MultiReader(
		&envVariables,
		bytes.NewBuffer(s.Stdin),
	)
	session.Stdout = s.Stdout
	session.Stderr = s.Stderr
	err = session.Run(s.Command)
	session.Close()
	return err
}
示例#11
0
文件: main.go 项目: saljam/dnsproxy
func tunnel(conn net.Conn) {
	defer conn.Close()

	// We start with a JSON header, currenly only has the dest addr.
	hdrdec := json.NewDecoder(conn)
	var hdr header
	err := hdrdec.Decode(&hdr)
	if err != nil {
		log.Printf("Couldn't parse tunnelled connection header: %v", err)
		return
	}

	destc, err := net.Dial("tcp", hdr.Destaddr)
	if err != nil {
		log.Printf("Couldn't dial destination $v: %v", hdr.Destaddr, err)
		return
	}
	defer destc.Close()

	log.Printf("Now tunnelling %v to %v", conn.RemoteAddr(), destc.RemoteAddr())
	done := make(chan struct{})
	go func() {
		io.Copy(destc, io.MultiReader(hdrdec.Buffered(), conn))
		done <- struct{}{}
	}()
	io.Copy(conn, destc)
	<-done
}
示例#12
0
func copyToTemp(reader io.Reader, fileSize int64, cb progress.CopyCallback) (oid string, size int64, tmp *os.File, err error) {
	tmp, err = TempFile("")
	if err != nil {
		return
	}

	defer tmp.Close()

	oidHash := sha256.New()
	writer := io.MultiWriter(oidHash, tmp)

	if fileSize == 0 {
		cb = nil
	}

	by, ptr, err := DecodeFrom(reader)
	if err == nil && len(by) < 512 {
		err = errutil.NewCleanPointerError(err, ptr, by)
		return
	}

	multi := io.MultiReader(bytes.NewReader(by), reader)
	size, err = tools.CopyWithCallback(writer, multi, fileSize, cb)

	if err != nil {
		return
	}

	oid = hex.EncodeToString(oidHash.Sum(nil))
	return
}
示例#13
0
文件: section.go 项目: kourge/ggit
// Returns an io.Reader that yields the section name in square brackets as the
// first line. Subsequent lines are the underlying Entry structs serialized in
// order, each indented by a single horizontal tab rune '\t' and each separated
// by a new line rune '\n'.
func (section *Section) Reader() io.Reader {
	offset := 3
	readers := make([]io.Reader, len(section.Dict)*3+offset)
	readers[0] = bytes.NewReader([]byte{'['})
	readers[1] = strings.NewReader(section.Name)
	readers[2] = bytes.NewReader([]byte{']', '\n'})

	keys := make([]string, len(section.Dict))
	{
		i := 0
		for k, _ := range section.Dict {
			keys[i] = k
			i++
		}
	}
	sort.Strings(keys)

	for i, k := range keys {
		v := section.Dict[k]
		readers[i*3+offset+0] = bytes.NewReader([]byte{'\t'})
		readers[i*3+offset+1] = (&Entry{k, v}).Reader()
		readers[i*3+offset+2] = bytes.NewReader([]byte{'\n'})
	}

	return io.MultiReader(readers...)
}
示例#14
0
文件: temp.go 项目: daozhao/s3weed
//TeeRead writes the data from the reader into the writer, and returns a reader
func TeeRead(w io.Writer, r io.Reader, maxMemory int64) (io.ReadCloser, error) {
	b := bytes.NewBuffer(nil)
	if maxMemory <= 0 {
		maxMemory = 1 << 20 // 1Mb
	}
	size, err := io.CopyN(io.MultiWriter(w, b), r, maxMemory+1)
	if err != nil && err != io.EOF {
		return nil, err
	}
	if size <= maxMemory {
		return ioutil.NopCloser(bytes.NewReader(b.Bytes())), nil
	}
	// too big, write to disk and flush buffer
	file, err := ioutil.TempFile("", "reader-")
	if err != nil {
		return nil, err
	}
	nm := file.Name()
	size, err = io.Copy(io.MultiWriter(w, file), io.MultiReader(b, r))
	if err != nil {
		file.Close()
		os.Remove(nm)
		return nil, err
	}
	file.Close()
	fh, err := os.Open(nm)
	return tempFile{File: fh}, err
}
示例#15
0
文件: temp.go 项目: daozhao/s3weed
// NewReadSeeker returns a copy of the r io.Reader which can be Seeken and closed.
func NewReadSeeker(r io.Reader, maxMemory int64) (ReadSeekCloser, error) {
	b := bytes.NewBuffer(nil)
	if maxMemory <= 0 {
		maxMemory = 1 << 20 // 1Mb
	}
	size, err := io.CopyN(b, r, maxMemory+1)
	if err != nil && err != io.EOF {
		return nil, err
	}
	if size <= maxMemory {
		return &tempBuf{bytes.NewReader(b.Bytes())}, nil
	}
	// too big, write to disk and flush buffer
	file, err := ioutil.TempFile("", "reader-")
	if err != nil {
		return nil, err
	}
	nm := file.Name()
	size, err = io.Copy(file, io.MultiReader(b, r))
	if err != nil {
		file.Close()
		os.Remove(nm)
		return nil, err
	}
	file.Close()
	fh, err := os.Open(nm)
	return tempFile{File: fh}, err
}
示例#16
0
func tarUntar(t *testing.T, origin string, compression Compression) error {
	archive, err := Tar(origin, compression)
	if err != nil {
		t.Fatal(err)
	}

	buf := make([]byte, 10)
	if _, err := archive.Read(buf); err != nil {
		return err
	}
	archive = io.MultiReader(bytes.NewReader(buf), archive)

	detectedCompression := DetectCompression(buf)
	if detectedCompression.Extension() != compression.Extension() {
		return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
	}

	tmp, err := ioutil.TempDir("", "docker-test-untar")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmp)
	if err := Untar(archive, tmp, nil); err != nil {
		return err
	}
	if _, err := os.Stat(tmp); err != nil {
		return err
	}
	return nil
}
示例#17
0
// Returns an io.ReadCloser for given file, such that the bytes read are
// ready for upload: specifically, if encryption is enabled, the contents
// are encrypted with the given key and the initialization vector is
// prepended to the returned bytes. Otherwise, the contents of the file are
// returned directly.
func getFileContentsReaderForUpload(path string, encrypt bool,
	iv []byte) (io.ReadCloser, int64, error) {
	f, err := os.Open(path)
	if err != nil {
		return f, 0, err
	}

	stat, err := os.Stat(path)
	if err != nil {
		return nil, 0, err
	}
	fileSize := stat.Size()

	if encrypt {
		if key == nil {
			key = decryptEncryptionKey()
		}

		r := makeEncrypterReader(key, iv, f)

		// Prepend the initialization vector to the returned bytes.
		r = io.MultiReader(bytes.NewReader(iv[:aes.BlockSize]), r)

		readCloser := struct {
			io.Reader
			io.Closer
		}{r, f}
		return readCloser, fileSize + aes.BlockSize, nil
	}
	return f, fileSize, nil
}
示例#18
0
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
	buf := make([]byte, 10)
	totalN := 0
	for totalN < 10 {
		n, err := archive.Read(buf[totalN:])
		if err != nil {
			if err == io.EOF {
				return nil, fmt.Errorf("Tarball too short")
			}
			return nil, err
		}
		totalN += n
		utils.Debugf("[tar autodetect] n: %d", n)
	}
	compression := DetectCompression(buf)
	wrap := io.MultiReader(bytes.NewReader(buf), archive)

	switch compression {
	case Uncompressed:
		return ioutil.NopCloser(wrap), nil
	case Gzip:
		return gzip.NewReader(wrap)
	case Bzip2:
		return ioutil.NopCloser(bzip2.NewReader(wrap)), nil
	case Xz:
		return xzDecompress(wrap)
	default:
		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
	}
}
示例#19
0
func runWithLogging(name string, args ...string) error {
	cmd := exec.Command(name, args...)
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return err
	}
	stderr, err := cmd.StderrPipe()
	if err != nil {
		return err
	}

	if err := cmd.Start(); err != nil {
		return err
	}

	scanner := bufio.NewScanner(io.MultiReader(stdout, stderr))
	for scanner.Scan() {
		log.Printf("%s: %s\n", name, scanner.Text())
	}
	if err := scanner.Err(); err != nil {
		log.Printf("error reading %s's stdout/stderr: %s\n", name, err)
	}

	if err := cmd.Wait(); err != nil {
		return err
	}
	return nil
}
示例#20
0
func (resource *S3Resource) Store() error {
	opts := &s3.PutOptions{
		ServerSideEncryption: true,
		ContentType:          resource.Request.Header.Get("Content-Type"),
	}
	buf := bytes.NewBuffer(make([]byte, 0, s3.MinPartSize))
	_, e := io.CopyN(buf, resource.Request.Body, s3.MinPartSize)
	if e == io.EOF {
		// less than min multipart size => direct upload
		return resource.Client.Put(resource.Bucket, resource.key(), buf.Bytes(), opts)
	} else if e != nil {
		return e
	}
	mr := io.MultiReader(buf, resource.Request.Body)

	mo := &s3.MultipartOptions{
		PartSize: 5 * 1024 * 1024,
		Callback: func(res *s3.UploadPartResult) {
			if res.Error != nil {
				logger.Print("ERROR: " + e.Error())
			} else if res.Part != nil {
				logger.Printf("uploaded: %03d (%s) %d", res.Part.PartNumber, res.Part.ETag, res.CurrentSize)
			}
		},
		PutOptions: opts,
	}
	_, e = resource.Client.PutMultipart(resource.Bucket, resource.key(), mr, mo)
	return e
}
示例#21
0
func (r *Response) genericRequest(method, url string, cookies []http.Cookie, headers map[string][]string) Response {
	request, _ := http.NewRequest(
		method,
		r.Server.URL+url,
		io.MultiReader())

	// note: http://golang.org/pkg/net/http/#Request.AddCookie
	// todo: test with custom cookies
	if cookies != nil {
		for _, cookie := range cookies {
			request.AddCookie(&cookie)
		}
	}

	// todo: test with custom headers
	if headers != nil {
		request.Header = headers
	}

	client := http.Client{}
	response, _ := client.Do(request)
	body, _ := ioutil.ReadAll(response.Body)
	response.Body.Close()

	return Response{
		StatusCode:    response.StatusCode,
		Body:          string(body),
		Header:        response.Header,
		ContentLength: response.ContentLength,
	}
}
示例#22
0
文件: bix.go 项目: brentp/bix
func (tbx *Bix) Query(region interfaces.IPosition) (interfaces.RelatableIterator, error) {
	tbx2, err := newShort(tbx)
	if err != nil {
		return nil, err
	}
	if region == nil {
		var l string
		var err error
		buf := bufio.NewReader(tbx2.bgzf)
		l, err = buf.ReadString('\n')
		for i := 0; i < int(tbx2.Index.Skip) || rune(l[0]) == tbx2.Index.MetaChar; i++ {
			l, err = buf.ReadString('\n')
			if err != nil {
				return nil, err
			}
		}
		if tbx2.Index.Skip == 0 && rune(l[0]) != tbx2.Index.MetaChar {
			buf = bufio.NewReader(io.MultiReader(strings.NewReader(l), buf))
		}
		return bixerator{nil, buf, tbx2, region}, nil
	}

	cr, err := tbx2.ChunkedReader(region.Chrom(), int(region.Start()), int(region.End()))
	if err != nil {
		if cr != nil {
			tbx2.Close()
			cr.Close()
		}
		return nil, err
	}
	return bixerator{cr, bufio.NewReader(cr), tbx2, region}, nil
}
示例#23
0
func cmdExec(cwd string, args ...string) error {
	cmd := exec.Command(args[0], args[1:]...)
	cmd.Dir = cwd
	glog.V(6).Infoln("running", args)
	cmd.Stdin = os.Stdin
	if glog.V(8) {
		stdout, err := cmd.StdoutPipe()
		if err != nil {
			return err
		}
		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}
		multiReader := io.MultiReader(stdout, stderr)
		pipeLogger := func(rdr io.Reader) {
			scanner := bufio.NewScanner(rdr)
			for scanner.Scan() {
				glog.V(8).Infoln(scanner.Text())
			}
		}
		go pipeLogger(multiReader)
	}
	return cmd.Run()
}
示例#24
0
文件: client.go 项目: vito/gocloud
func (client *Client) PutStream(bucket, key string, r io.Reader, options *PutOptions) error {
	if options == nil {
		options = &PutOptions{ContentType: DEFAULT_CONTENT_TYPE}
	}
	theUrl := client.keyUrl(bucket, key)
	req, e := http.NewRequest("PUT", theUrl, r)
	if e != nil {
		return e
	}

	req.Header = client.putRequestHeaders(bucket, key, options)

	buf := bytes.NewBuffer(make([]byte, 0, MinPartSize))
	_, e = io.CopyN(buf, r, MinPartSize)
	if e == io.EOF {
		// less than min multipart size => direct upload
		return client.Put(bucket, key, buf.Bytes(), options)
	} else if e != nil {
		return e
	}
	mr := io.MultiReader(buf, r)

	mo := &MultipartOptions{
		PartSize: 5 * 1024 * 1024,
		Callback: func(res *UploadPartResult) {
			if res.Error != nil {
				logger.Print("ERROR: " + e.Error())
			}
		},
		PutOptions: options,
	}
	_, e = client.PutMultipart(bucket, key, mr, mo)
	return e
}
示例#25
0
func (c *Corpus) readIndex(filenames string) error {
	matches, err := filepath.Glob(filenames)
	if err != nil {
		return err
	} else if matches == nil {
		return fmt.Errorf("no index files match %q", filenames)
	}
	sort.Strings(matches) // make sure files are in the right order
	files := make([]io.Reader, 0, len(matches))
	for _, filename := range matches {
		f, err := os.Open(filename)
		if err != nil {
			return err
		}
		defer f.Close()
		files = append(files, f)
	}
	x := new(Index)
	if _, err := x.ReadFrom(io.MultiReader(files...)); err != nil {
		return err
	}
	if !x.CompatibleWith(c) {
		return fmt.Errorf("index file options are incompatible: %v", x.opts)
	}
	c.searchIndex.Set(x)
	return nil
}
示例#26
0
func TestReadFrom(t *testing.T) {
	t.Parallel()

	l := makeFakeListener("net.Listener")
	wl := Wrap(l, Manual)
	c := &readerConn{
		fakeConn{
			read:   make(chan struct{}),
			write:  make(chan struct{}),
			closed: make(chan struct{}),
			me:     fakeAddr{"tcp", "local"},
			you:    fakeAddr{"tcp", "remote"},
		},
	}

	go l.Enqueue(c)
	wc, err := wl.Accept()
	if err != nil {
		t.Fatalf("error accepting connection: %v", err)
	}

	// The io.MultiReader is a convenient hack to ensure that we're using
	// our ReadFrom, not strings.Reader's WriteTo.
	r := io.MultiReader(strings.NewReader("hello world"))
	if _, err := io.Copy(wc, r); err != nil {
		t.Fatalf("error copying: %v", err)
	}
}
示例#27
0
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the number of bytes
// indicated.
func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
	if size < int64(r.r.DataShards) {
		return ErrShortData
	}

	if len(dst) != r.r.DataShards {
		return ErrInvShardNum
	}

	for i := range dst {
		if dst[i] == nil {
			return StreamWriteError{Err: ErrShardNoData, Stream: i}
		}
	}

	// Calculate number of bytes per shard.
	perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)

	// Pad data to r.Shards*perShard.
	padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
	data = io.MultiReader(data, bytes.NewBuffer(padding))

	// Split into equal-length shards and copy.
	for i := range dst {
		n, err := io.CopyN(dst[i], data, perShard)
		if err != io.EOF && err != nil {
			return err
		}
		if n != perShard {
			return ErrShortData
		}
	}

	return nil
}
示例#28
0
文件: s3.go 项目: ably-forks/flynn
func (b *s3Backend) Put(tx *postgres.DBTx, info FileInfo, r io.Reader, append bool) error {
	if append {
		// This is a hack, the next easiest thing to do if we need to handle
		// upload resumption is to finalize the multipart upload when the client
		// disconnects and when the rest of the data arrives, start a new
		// multi-part upload copying the existing object as the first part
		// (which is supported by S3 as a specific API call). This requires
		// replacing the simple uploader, so it was not done in the first pass.
		existing, err := b.Open(tx, info, false)
		if err != nil {
			return err
		}
		r = io.MultiReader(existing, r)
	}

	info.ExternalID = random.UUID()
	if err := tx.Exec("UPDATE files SET external_id = $2 WHERE file_id = $1", info.ID, info.ExternalID); err != nil {
		return err
	}

	u := s3manager.NewUploaderWithClient(b.client)
	_, err := u.Upload(&s3manager.UploadInput{
		Bucket:      &b.bucket,
		Key:         &info.ExternalID,
		ContentType: &info.Type,
		Body:        r,
	})

	return err
}
示例#29
0
文件: watch.go 项目: zmdroid/bee
func Start(appname string) {
	fmt.Println("start", appname)
	cmd = exec.Command(appname)
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		fmt.Println("stdout:", err)
	}
	stderr, err := cmd.StderrPipe()
	if err != nil {
		fmt.Println("stdin:", err)
	}
	r := io.MultiReader(stdout, stderr)
	err = cmd.Start()
	if err != nil {
		fmt.Println("cmd start:", err)
	}
	for {
		buf := make([]byte, 1024)
		count, err := r.Read(buf)
		if err != nil || count == 0 {
			fmt.Println("process exit")
			restart <- true
			return
		} else {
			fmt.Println("result:", string(buf))
		}
	}
}
示例#30
0
// PutObject uploads a Google Cloud Storage object.
// shouldRetry will be true if the put failed due to authorization, but
// credentials have been refreshed and another attempt is likely to succeed.
// In this case, content will have been consumed.
func (gsa *Client) PutObject(obj *Object, content io.Reader) error {
	if err := obj.valid(); err != nil {
		return err
	}
	const maxSlurp = 2 << 20
	var buf bytes.Buffer
	n, err := io.CopyN(&buf, content, maxSlurp)
	if err != nil && err != io.EOF {
		return err
	}
	contentType := http.DetectContentType(buf.Bytes())
	if contentType == "application/octet-stream" && n < maxSlurp && utf8.Valid(buf.Bytes()) {
		contentType = "text/plain; charset=utf-8"
	}

	objURL := gsAccessURL + "/" + obj.Bucket + "/" + obj.Key
	var req *http.Request
	if req, err = http.NewRequest("PUT", objURL, ioutil.NopCloser(io.MultiReader(&buf, content))); err != nil {
		return err
	}
	req.Header.Set("x-goog-api-version", "2")
	req.Header.Set("Content-Type", contentType)

	var resp *http.Response
	if resp, err = gsa.client.Do(req); err != nil {
		return err
	}

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("Bad put response code: %v", resp.Status)
	}
	return nil
}