Пример #1
1
// decoder reads an image from r and modifies the image as defined by opts.
// swapDimensions indicates the decoded image will be rotated after being
// returned, and when interpreting opts, the post-rotation dimensions should
// be considered.
// The decoded image is returned in im. The registered name of the decoder
// used is returned in format. If the image was not successfully decoded, err
// will be non-nil.  If the decoded image was made smaller, needRescale will
// be true.
func decode(r io.Reader, opts *DecodeOpts, swapDimensions bool) (im image.Image, format string, err error, needRescale bool) {
	if opts == nil {
		// Fall-back to normal decode.
		im, format, err = image.Decode(r)
		return im, format, err, false
	}

	var buf bytes.Buffer
	tr := io.TeeReader(r, &buf)
	ic, format, err := image.DecodeConfig(tr)
	if err != nil {
		return nil, "", err, false
	}

	mr := io.MultiReader(&buf, r)
	b := image.Rect(0, 0, ic.Width, ic.Height)
	sw, sh, needRescale := opts.rescaleDimensions(b, swapDimensions)
	if !needRescale {
		im, format, err = image.Decode(mr)
		return im, format, err, false
	}

	imageDebug(fmt.Sprintf("Resizing from %dx%d -> %dx%d", ic.Width, ic.Height, sw, sh))
	if format == "cr2" {
		// Replace mr with an io.Reader to the JPEG thumbnail embedded in a
		// CR2 image.
		if mr, err = cr2.NewReader(mr); err != nil {
			return nil, "", err, false
		}
		format = "jpeg"
	}

	if format == "jpeg" && fastjpeg.Available() {
		factor := fastjpeg.Factor(ic.Width, ic.Height, sw, sh)
		if factor > 1 {
			var buf bytes.Buffer
			tr := io.TeeReader(mr, &buf)
			im, err = fastjpeg.DecodeDownsample(tr, factor)
			switch err.(type) {
			case fastjpeg.DjpegFailedError:
				log.Printf("Retrying with jpeg.Decode, because djpeg failed with: %v", err)
				im, err = jpeg.Decode(io.MultiReader(&buf, mr))
			case nil:
				// fallthrough to rescale() below.
			default:
				return nil, format, err, false
			}
			return rescale(im, sw, sh), format, err, true
		}
	}

	// Fall-back to normal decode.
	im, format, err = image.Decode(mr)
	if err != nil {
		return nil, "", err, false
	}
	return rescale(im, sw, sh), format, err, needRescale
}
Пример #2
0
func TestInline(t *testing.T) {

	esc := `url("data:image/svg+xml;utf8,%3C%3Fxml%20version=%221.0%22%20encoding=%22utf-8%22%3F%3E%3C%21--%20Generator:%20Adobe%20Illustrator%2018.1.0,%20SVG%20Export%20Plug-In%20.%20SVG%20Version:%206.00%20Build%200%29%20%20--%3E%3Csvg%20version=%221.1%22%20id=%22Gopher%22%20xmlns=%22http://www.w3.org/2000/svg%22%20xmlns:xlink=%22http://www.w3.org/1999/xlink%22%20x=%220px%22%20y=%220px%22%09%20viewBox=%220%200%20215.6%20281.6%22%20enable-background=%22new%200%200%20215.6%20281.6%22%20xml:space=%22preserve%22%3E%3Cg%3E%09%3Cpath%20fill=%22%238CC5E7%22%20d=%22M207.3,44.6c-6.7-13.7-22.9-1.6-27-5.9c-21-21.6-46.4-27-66.3-28c0,0-9,0-11,0c-20,0.5-45.4,6.3-66.3,28%09%09c-4.1,4.3-20.4-7.8-27,5.9c-7.7,16,15.7,17.6,14.5,24.7c-2.3,12.8-0.8,31.8,1,50.5C28,151.5,4.3,227.4,53.6,257.9%09%09c9.3,5.8,34.4,9,56.2,9.5l0,0c0,0,0.1,0,0.1,0c0,0,0.1,0,0.1,0l0,0c21.8-0.5,43.9-3.7,53.2-9.5c49.4-30.5,25.7-106.4,28.6-138.1%09%09c1.7-18.7,3.2-37.7,1-50.5C191.6,62.2,215,60.5,207.3,44.6z%22/%3E%09%3Cg%3E%09%09%3Cpath%20fill=%22%23E0DEDC%22%20d=%22M143.2,54.3c-33.4,3.9-28.9,38.7-16,50c24,21,49,0,46.2-21.2C170.9,62.7,153.6,53.1,143.2,54.3z%22/%3E%09%09%3Ccircle%20fill=%22%23111212%22%20cx=%22145.5%22%20cy=%2284.3%22%20r=%2211.4%22/%3E%09%09%3Ccircle%20fill=%22%23FFFFFF%22%20cx=%22142.5%22%20cy=%2279.4%22%20r=%223.6%22/%3E%09%3C/g%3E%09%3Cg%3E%09%09%3Cpath%20fill=%22%23B8937F%22%20d=%22M108.5,107c-16,2.4-21.7,7-20.5,14.2c2,11.8,39.7,10.5,40.9,0.6C129.9,113.3,114.8,106.1,108.5,107z%22/%3E%09%09%3Cpath%20d=%22M98.2,111.8c-2.7,9.8,21.7,8.3,21.1,2c-0.3-3.7-3.6-8.4-12.3-8.2C103.6,105.7,99.4,107.2,98.2,111.8z%22/%3E%09%09%3Cpath%20fill=%22%23E0DEDC%22%20d=%22M99,127.7c-0.9,0.4-2.4,10.2,2.2,10.7c3.1,0.3,11.6,1.3,13.6,0c3.9-2.5,3.5-8.5,1.3-10%09%09%09C112.4,126,100,127.2,99,127.7z%22/%3E%09%3C/g%3E%09%3Cg%3E%09%09%3Cpath%20fill=%22%23E0DEDC%22%20d=%22M73.6,54.3c33.4,3.9,28.9,38.7,16,50c-24,21-49,0-46.2-21.2C46,62.7,63.3,53.1,73.6,54.3z%22/%3E%09%09%3Ccircle%20fill=%22%23111212%22%20cx=%2271.4%22%20cy=%2284.3%22%20r=%2211.4%22/%3E%09%09%3Ccircle%20fill=%22%23FFFFFF%22%20cx=%2274.4%22%20cy=%2279.4%22%20r=%223.6%22/%3E%09%3C/g%3E%09%3Cpath%20fill=%22%23B8937F%22%20d=%22M193.6,186.7c11,0.1,5.6-23.5-1.2-18.8c-3.3,2.3-3.9,7.6-3.9,12.1C188.5,182.5,190.5,186.6,193.6,186.7z%22/%3E%09%3Cpath%20fill=%22%23B8937F%22%20d=%22M23.3,186.7c-11,0.1-5.6-23.5,1.2-18.8c3.3,2.3,3.9,7.6,3.9,12.1C28.4,182.5,26.4,186.6,23.3,186.7z%22/%3E%09%3Cpath%20fill=%22%23B8937F%22%20d=%22M172.7,259.2c-6-8.9-11.4-2-20.1,2.4c-4.1,2.1,6.8,9.6,19,4C174.8,264.1,174.7,262.1,172.7,259.2z%22/%3E%09%3Cpath%20fill=%22%23B8937F%22%20d=%22M44.2,260.2c6-8.9,11.4-2,20.1,2.4c4.1,2.1-6.8,9.6-19,4C42.1,265.1,42.2,263.1,44.2,260.2z%22/%3E%09%3Cpath%20fill=%22%233C89BF%22%20d=%22M188.6,47c-0.6,2.1,2.1,1.8,3.1,8.3c0.4,2.4,9-3.5,5.5-7.8C194.3,43.9,189.1,44.9,188.6,47z%22/%3E%09%3Cpath%20fill=%22%233C89BF%22%20d=%22M28.3,47c0.6,2.1-2.1,1.8-3.1,8.3c-0.4,2.4-9-3.5-5.5-7.8C22.5,43.9,27.7,44.9,28.3,47z%22/%3E%3C/g%3E%3C/svg%3E")`

	b64 := `url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4NCjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxOC4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4NCjxzdmcgdmVyc2lvbj0iMS4xIiBpZD0iR29waGVyIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB4PSIwcHgiIHk9IjBweCINCgkgdmlld0JveD0iMCAwIDIxNS42IDI4MS42IiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCAyMTUuNiAyODEuNiIgeG1sOnNwYWNlPSJwcmVzZXJ2ZSI+DQo8Zz4NCgk8cGF0aCBmaWxsPSIjOENDNUU3IiBkPSJNMjA3LjMsNDQuNmMtNi43LTEzLjctMjIuOS0xLjYtMjctNS45Yy0yMS0yMS42LTQ2LjQtMjctNjYuMy0yOGMwLDAtOSwwLTExLDBjLTIwLDAuNS00NS40LDYuMy02Ni4zLDI4DQoJCWMtNC4xLDQuMy0yMC40LTcuOC0yNyw1LjljLTcuNywxNiwxNS43LDE3LjYsMTQuNSwyNC43Yy0yLjMsMTIuOC0wLjgsMzEuOCwxLDUwLjVDMjgsMTUxLjUsNC4zLDIyNy40LDUzLjYsMjU3LjkNCgkJYzkuMyw1LjgsMzQuNCw5LDU2LjIsOS41bDAsMGMwLDAsMC4xLDAsMC4xLDBjMCwwLDAuMSwwLDAuMSwwbDAsMGMyMS44LTAuNSw0My45LTMuNyw1My4yLTkuNWM0OS40LTMwLjUsMjUuNy0xMDYuNCwyOC42LTEzOC4xDQoJCWMxLjctMTguNywzLjItMzcuNywxLTUwLjVDMTkxLjYsNjIuMiwyMTUsNjAuNSwyMDcuMyw0NC42eiIvPg0KCTxnPg0KCQk8cGF0aCBmaWxsPSIjRTBERURDIiBkPSJNMTQzLjIsNTQuM2MtMzMuNCwzLjktMjguOSwzOC43LTE2LDUwYzI0LDIxLDQ5LDAsNDYuMi0yMS4yQzE3MC45LDYyLjcsMTUzLjYsNTMuMSwxNDMuMiw1NC4zeiIvPg0KCQk8Y2lyY2xlIGZpbGw9IiMxMTEyMTIiIGN4PSIxNDUuNSIgY3k9Ijg0LjMiIHI9IjExLjQiLz4NCgkJPGNpcmNsZSBmaWxsPSIjRkZGRkZGIiBjeD0iMTQyLjUiIGN5PSI3OS40IiByPSIzLjYiLz4NCgk8L2c+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNCODkzN0YiIGQ9Ik0xMDguNSwxMDdjLTE2LDIuNC0yMS43LDctMjAuNSwxNC4yYzIsMTEuOCwzOS43LDEwLjUsNDAuOSwwLjZDMTI5LjksMTEzLjMsMTE0LjgsMTA2LjEsMTA4LjUsMTA3eiIvPg0KCQk8cGF0aCBkPSJNOTguMiwxMTEuOGMtMi43LDkuOCwyMS43LDguMywyMS4xLDJjLTAuMy0zLjctMy42LTguNC0xMi4zLTguMkMxMDMuNiwxMDUuNyw5OS40LDEwNy4yLDk4LjIsMTExLjh6Ii8+DQoJCTxwYXRoIGZpbGw9IiNFMERFREMiIGQ9Ik05OSwxMjcuN2MtMC45LDAuNC0yLjQsMTAuMiwyLjIsMTAuN2MzLjEsMC4zLDExLjYsMS4zLDEzLjYsMGMzLjktMi41LDMuNS04LjUsMS4zLTEwDQoJCQlDMTEyLjQsMTI2LDEwMCwxMjcuMiw5OSwxMjcuN3oiLz4NCgk8L2c+DQoJPGc+DQoJCTxwYXRoIGZpbGw9IiNFMERFREMiIGQ9Ik03My42LDU0LjNjMzMuNCwzLjksMjguOSwzOC43LDE2LDUwYy0yNCwyMS00OSwwLTQ2LjItMjEuMkM0Niw2Mi43LDYzLjMsNTMuMSw3My42LDU0LjN6Ii8+DQoJCTxjaXJjbGUgZmlsbD0iIzExMTIxMiIgY3g9IjcxLjQiIGN5PSI4NC4zIiByPSIxMS40Ii8+DQoJCTxjaXJjbGUgZmlsbD0iI0ZGRkZGRiIgY3g9Ijc0LjQiIGN5PSI3OS40IiByPSIzLjYiLz4NCgk8L2c+DQoJPHBhdGggZmlsbD0iI0I4OTM3RiIgZD0iTTE5My42LDE4Ni43YzExLDAuMSw1LjYtMjMuNS0xLjItMTguOGMtMy4zLDIuMy0zLjksNy42LTMuOSwxMi4xQzE4OC41LDE4Mi41LDE5MC41LDE4Ni42LDE5My42LDE4Ni43eiIvPg0KCTxwYXRoIGZpbGw9IiNCODkzN0YiIGQ9Ik0yMy4zLDE4Ni43Yy0xMSwwLjEtNS42LTIzLjUsMS4yLTE4LjhjMy4zLDIuMywzLjksNy42LDMuOSwxMi4xQzI4LjQsMTgyLjUsMjYuNCwxODYuNiwyMy4zLDE4Ni43eiIvPg0KCTxwYXRoIGZpbGw9IiNCODkzN0YiIGQ9Ik0xNzIuNywyNTkuMmMtNi04LjktMTEuNC0yLTIwLjEsMi40Yy00LjEsMi4xLDYuOCw5LjYsMTksNEMxNzQuOCwyNjQuMSwxNzQuNywyNjIuMSwxNzIuNywyNTkuMnoiLz4NCgk8cGF0aCBmaWxsPSIjQjg5MzdGIiBkPSJNNDQuMiwyNjAuMmM2LTguOSwxMS40LTIsMjAuMSwyLjRjNC4xLDIuMS02LjgsOS42LTE5LDRDNDIuMSwyNjUuMSw0Mi4yLDI2My4xLDQ0LjIsMjYwLjJ6Ii8+DQoJPHBhdGggZmlsbD0iIzNDODlCRiIgZD0iTTE4OC42LDQ3Yy0wLjYsMi4xLDIuMSwxLjgsMy4xLDguM2MwLjQsMi40LDktMy41LDUuNS03LjhDMTk0LjMsNDMuOSwxODkuMSw0NC45LDE4OC42LDQ3eiIvPg0KCTxwYXRoIGZpbGw9IiMzQzg5QkYiIGQ9Ik0yOC4zLDQ3YzAuNiwyLjEtMi4xLDEuOC0zLjEsOC4zYy0wLjQsMi40LTktMy41LTUuNS03LjhDMjIuNSw0My45LDI3LjcsNDQuOSwyOC4zLDQ3eiIvPg0KPC9nPg0KPC9zdmc+")`

	f, err := os.Open("test/gopher-front.svg")
	if err != nil {
		t.Error(err)
	}
	var fb, fbb, buf bytes.Buffer

	tr := io.TeeReader(f, &fb)
	Inline(tr, &buf)
	if esc != buf.String() {
		t.Errorf("got:\n%s\nwanted:\n%s", buf.String(), esc)
	}

	tr = io.TeeReader(&fb, &fbb)
	buf.Reset()
	Inline(tr, &buf, false)
	if esc != buf.String() {
		t.Errorf("got:\n%s\nwanted:\n%s", buf.String(), esc)
	}

	buf.Reset()
	Inline(&fbb, &buf, true)
	if b64 != buf.String() {
		t.Errorf("got:\n%s\nwanted:\n%s", buf.String(), b64)
	}

}
Пример #3
0
func (s Shell) executeCommand() (err error) {
	outReader, err := s.Command.StdoutPipe()
	if err != nil {
		return
	}

	errReader, err := s.Command.StderrPipe()
	if err != nil {
		return
	}

	var bufout, buferr bytes.Buffer
	outReader2 := io.TeeReader(outReader, &bufout)
	errReader2 := io.TeeReader(errReader, &buferr)

	s.Command.Start()
	outScanner := bufio.NewScanner(outReader2)
	for outScanner.Scan() {
		fmt.Println(outScanner.Text())
	}

	errScanner := bufio.NewScanner(errReader2)
	for errScanner.Scan() {
		fmt.Println("[stderr]" + errScanner.Text())
	}

	s.Command.Wait()
	return
}
Пример #4
0
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
	for _, l := range layers {
		b, err := dm.blobStore.New()
		if err != nil {
			return initialRootFS, nil, err
		}
		defer b.Close()
		rc, _, err := l.Download(ctx, progressOutput)
		if err != nil {
			return initialRootFS, nil, errors.Wrap(err, "failed to download")
		}
		defer rc.Close()
		r := io.TeeReader(rc, b)
		inflatedLayerData, err := archive.DecompressStream(r)
		if err != nil {
			return initialRootFS, nil, err
		}
		digester := digest.Canonical.New()
		if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
			return initialRootFS, nil, err
		}
		initialRootFS.Append(layer.DiffID(digester.Digest()))
		d, err := b.Commit()
		if err != nil {
			return initialRootFS, nil, err
		}
		dm.blobs = append(dm.blobs, d)
	}
	return initialRootFS, nil, nil
}
Пример #5
0
func main() {
	src, err := os.Open("src.txt")
	if err != nil {
		panic(err)
	}
	defer src.Close()

	dst1, err := os.Create("dst1.txt")
	if err != nil {
		panic(err)
	}
	defer dst1.Close()

	dst2, err := os.Create("dst2.txt")
	if err != nil {
		panic(err)
	}
	defer dst2.Close()

	dst3, err := os.Create("dst3.txt")
	if err != nil {
		panic(err)
	}
	defer dst3.Close()

	rdr := io.TeeReader(src, dst1)
	rdr = io.TeeReader(rdr, os.Stdout)
	rdr = io.TeeReader(rdr, dst2)

	io.Copy(dst3, rdr)

}
Пример #6
0
func multiunique(src, dst io.Reader) io.Reader {
	ur := &uniqueReader{}
	ur.src = io.TeeReader(src, &ur.bufsrc)
	ur.dst = io.TeeReader(dst, &ur.bufdst)
	ur.r = io.MultiReader(&ur.bufsrc, src, &ur.bufdst, dst)
	return ur
}
Пример #7
0
// preprocessUpload pulls in all the data from the reader, storing it in a temp file and
// calculating the md5 and sha256 checksums.
// The caller is expected to remove the temporary file if and only if we return a nil error.
func (ms *managedStorage) preprocessUpload(r io.Reader, length int64) (
	f *os.File, md5hashHex, sha256hashHex string, err error,
) {
	sha256hash := sha256.New()
	md5hash := md5.New()
	// Set up a chain of readers to pull in the data and calculate the checksums.
	rdr := io.TeeReader(io.TeeReader(r, sha256hash), md5hash)
	f, err = ioutil.TempFile(os.TempDir(), "juju-resource")
	if err != nil {
		return nil, "", "", err
	}
	// Add a cleanup function to remove the data file if we exit with an error.
	defer func() {
		if err != nil {
			os.Remove(f.Name())
		}
	}()
	// Write the data to a temp file.
	_, err = io.CopyN(f, rdr, length)
	if err != nil {
		return nil, "", "", err
	}
	// Reset the file so when we return it, it can be read from to get the data.
	_, err = f.Seek(0, 0)
	if err != nil {
		return nil, "", "", err
	}
	md5hashHex = fmt.Sprintf("%x", md5hash.Sum(nil))
	sha256hashHex = fmt.Sprintf("%x", sha256hash.Sum(nil))
	return f, md5hashHex, sha256hashHex, nil
}
Пример #8
0
func (sh *SyncHandler) copyBlob(sb blob.SizedRef) (err error) {
	cs := sh.newCopyStatus(sb)
	defer func() { cs.setError(err) }()
	br := sb.Ref

	sh.mu.Lock()
	sh.copying[br] = cs
	sh.mu.Unlock()

	if strings.Contains(storageDesc(sh.to), "bradfitz-camlistore-pt") {
		//sh.logf("LIES NOT ACTUALLY COPYING")
		//return nil
	}

	if sb.Size > constants.MaxBlobSize {
		return fmt.Errorf("blob size %d too large; max blob size is %d", sb.Size, constants.MaxBlobSize)
	}

	cs.setStatus(statusFetching)
	rc, fromSize, err := sh.from.FetchStreaming(br)
	if err != nil {
		return fmt.Errorf("source fetch: %v", err)
	}
	if fromSize != sb.Size {
		rc.Close()
		return fmt.Errorf("source fetch size mismatch: get=%d, enumerate=%d", fromSize, sb.Size)
	}

	buf := make([]byte, fromSize)
	hash := br.Hash()
	cs.setStatus(statusReading)
	n, err := io.ReadFull(io.TeeReader(rc,
		io.MultiWriter(
			incrWriter{cs, &cs.nread},
			hash,
		)), buf)
	rc.Close()
	if err != nil {
		return fmt.Errorf("Read error after %d/%d bytes: %v", n, fromSize, err)
	}
	if !br.HashMatches(hash) {
		return fmt.Errorf("Read data has unexpected digest %x", hash.Sum(nil))
	}

	cs.setStatus(statusWriting)
	newsb, err := sh.to.ReceiveBlob(br, io.TeeReader(bytes.NewReader(buf), incrWriter{cs, &cs.nwrite}))
	if err != nil {
		return fmt.Errorf("dest write: %v", err)
	}
	if newsb.Size != sb.Size {
		return fmt.Errorf("write size mismatch: source_read=%d but dest_write=%d", sb.Size, newsb.Size)
	}
	return nil
}
Пример #9
0
func ParsingHandler(serverConn, clientConn net.Conn) error {
	log.Printf("Proxying new connection from %v", clientConn.RemoteAddr())

	s2cLog := util.NewLogger(fmt.Sprintf("%s->%s", serverConn.RemoteAddr(), clientConn.RemoteAddr()))
	c2sLog := util.NewLogger(fmt.Sprintf("%s->%s", clientConn.RemoteAddr(), serverConn.RemoteAddr()))

	go parser(io.TeeReader(serverConn, clientConn), serverConn, s2cLog)
	go parser(io.TeeReader(clientConn, serverConn), clientConn, c2sLog)

	return nil
}
Пример #10
0
func walk(path string, info os.FileInfo, inErr error) (err error) {
	if inErr != nil {
		return inErr
	}
	if !info.Mode().IsRegular() {
		return
	}

	shouldRemove := false

	file, err := os.Open(path)
	if err != nil {
		return
	}
	defer func() {
		file.Close()
		if shouldRemove {
			err = os.Remove(path)
		}
	}()

	crc32Hash.Reset()
	md5Hash.Reset()
	sha1Hash.Reset()

	md5TeeReader := io.TeeReader(file, md5Hash)
	sha1TeeReader := io.TeeReader(md5TeeReader, sha1Hash)
	io.CopyBuffer(crc32Hash, sha1TeeReader, copyBuffer)

	var key FileSum
	key.size = info.Size()
	key.crc32Sum = crc32Hash.Sum32()
	copy(key.md5Sum[:], md5Hash.Sum(nil))
	copy(key.sha1Sum[:], sha1Hash.Sum(nil))

	fmt.Printf("path: %s\r\nsize: %d bytes\r\ncrc32: %x\r\nmd5: %s\r\nsha1: %s\r\n\r\n",
		path,
		key.size,
		key.crc32Sum,
		hex.EncodeToString(key.md5Sum[:]),
		hex.EncodeToString(key.sha1Sum[:]),
	)

	if pathx, ok := FileSumSet[key]; ok {
		shouldRemove = true
		fmt.Printf("%s 与 %s 重复, 将被移除\r\n", path, pathx)
	} else {
		FileSumSet[key] = path
	}

	return
}
Пример #11
0
func main() {
	flag.Parse()
	args := flag.Args()

	conn, err := net.Dial("tcp", config.Addr)
	fatalOnError(err)
	defer conn.Close()

	var tee io.Reader
	var timeoutCh <-chan time.Time
	if output {
		if wc, ok := conn.(WriteCloser); ok {
			err := wc.CloseWrite()
			fatalOnError(err)
		} else {
			log.Fatalln("conn does not implement WriteCloser interface")
		}

		tee = io.TeeReader(conn, os.Stdout)
		timeoutCh = time.After(config.Timeout)
	} else {
		var r io.Reader
		if len(args) == 0 {
			r = os.Stdin
		} else {
			var readers []io.Reader
			for _, filename := range args {
				file, err := os.Open(filename)
				fatalOnError(err)
				defer file.Close()
				readers = append(readers, file)
			}
			r = io.MultiReader(readers...)
		}

		tee = io.TeeReader(r, conn)
	}

	errCh := make(chan error)
	go func() {
		_, err := ioutil.ReadAll(tee)
		errCh <- err
	}()

	select {
	case err = <-errCh:
		break
	case <-timeoutCh:
		err = errors.New("Timed out")
	}
	fatalOnError(err)
}
Пример #12
0
// Internal constructor which takes an additional argument for
// tee interfaces to OS standard in/out/err in addition to writing
// it to the struct
func runRoutine(r *RoutineRun, store bool, tee bool) error {
	startRoutine(r)

	// Construct the command to be executed
	c, err := r.Command()

	if err != nil {
		endRoutine(r, err)
		return err
	}

	// Buffers for subcommand interfaces. These are necessary for storing
	// on to the routine run. Note, these may not actually be used below.
	rstdin := new(bytes.Buffer)
	rstdout := new(bytes.Buffer)
	rstderr := new(bytes.Buffer)

	if store && tee {
		// Reads from os.Stdin and sends it to c.Stdin as well as writing
		// it to the stdin buffer
		c.Stdin = io.TeeReader(os.Stdin, rstdin)
		// Writes the commands stdout to os.Stdout and the buffer
		c.Stdout = io.MultiWriter(os.Stdout, rstdout)
		c.Stderr = io.MultiWriter(os.Stderr, rstderr)
	} else if store {
		// stdin must always come from the OS
		c.Stdin = io.TeeReader(os.Stdin, rstdin)
		c.Stdout = rstdout
		c.Stderr = rstderr
	} else if tee {
		c.Stdin = os.Stdin
		c.Stdout = os.Stdout
		c.Stderr = os.Stderr
	}

	// Run command
	log.Printf("Running routine `%s`\n", r.CommandString())
	err = c.Run()

	if store {
		// Set flags, store std interfaces
		r.Stdin = rstdin.String()
		r.Stdout = rstdout.String()
		r.Stderr = rstderr.String()
	}

	endRoutine(r, err)

	return err
}
Пример #13
0
func (sa solarAPIAccept) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	Log.Info().Log("msg", r.Method, "url", r.URL, "header", fmt.Sprintf("%#v", r.Header))
	if r.Body != nil {
		defer r.Body.Close()
	}
	var buf bytes.Buffer
	var data solarV1CurrentInverter
	if err := json.NewDecoder(io.TeeReader(r.Body, &buf)).Decode(&data); err != nil {
		Log.Error().Log("msg", "decode", "message", buf.String(), "error", err)
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	Log.Debug().Log("msg", "decoded", "data", fmt.Sprintf("%#v", data))
	w.WriteHeader(200)

	if err := sa.influxClient.Put("fronius energy",
		[]dataPoint{
			{Name: "pac", Time: data.Head.Timestamp,
				Unit:  data.Body.Pac.Unit,
				Value: data.Body.Pac.Values["1"]},
			{Name: "day", Time: data.Head.Timestamp,
				Unit:  data.Body.Day.Unit,
				Value: data.Body.Day.Values["1"]},
			{Name: "year", Time: data.Head.Timestamp,
				Unit:  data.Body.Year.Unit,
				Value: data.Body.Year.Values["1"]},
			{Name: "total", Time: data.Head.Timestamp,
				Unit:  data.Body.Total.Unit,
				Value: data.Body.Total.Values["1"]},
		}...); err != nil {
		Log.Error().Log("msg", "write batch to db", "error", err)
	}
}
Пример #14
0
Файл: twty.go Проект: mattn/twty
func rawCall(token *oauth.Credentials, method string, uri string, opt map[string]string, res interface{}) error {
	param := make(url.Values)
	for k, v := range opt {
		param.Set(k, v)
	}
	oauthClient.SignParam(token, method, uri, param)
	var resp *http.Response
	var err error
	if method == "GET" {
		uri = uri + "?" + param.Encode()
		resp, err = http.Get(uri)
	} else {
		resp, err = http.PostForm(uri, url.Values(param))
	}
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if res == nil {
		return nil
	}
	if *debug {
		return json.NewDecoder(io.TeeReader(resp.Body, os.Stdout)).Decode(&res)
	}
	return json.NewDecoder(resp.Body).Decode(&res)
}
Пример #15
0
func (ms *conversionStore) WriteACI(path string) (string, error) {
	f, err := os.Open(path)
	if err != nil {
		return "", err
	}
	defer f.Close()

	cr, err := aci.NewCompressedReader(f)
	if err != nil {
		return "", err
	}
	defer cr.Close()

	h := sha512.New()
	r := io.TeeReader(cr, h)

	// read the file so we can get the hash
	if _, err := io.Copy(ioutil.Discard, r); err != nil {
		return "", fmt.Errorf("error reading ACI: %v", err)
	}

	im, err := aci.ManifestFromImage(f)
	if err != nil {
		return "", err
	}

	key := ms.HashToKey(h)
	ms.acis[key] = &aciInfo{path: path, key: key, ImageManifest: im}
	return key, nil
}
Пример #16
0
// NewFileMeta generates a FileMeta object from the reader, using the
// hash algorithms provided
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
	if len(hashAlgorithms) == 0 {
		hashAlgorithms = []string{defaultHashAlgorithm}
	}
	hashes := make(map[string]hash.Hash, len(hashAlgorithms))
	for _, hashAlgorithm := range hashAlgorithms {
		var h hash.Hash
		switch hashAlgorithm {
		case "sha256":
			h = sha256.New()
		case "sha512":
			h = sha512.New()
		default:
			return FileMeta{}, fmt.Errorf("Unknown Hash Algorithm: %s", hashAlgorithm)
		}
		hashes[hashAlgorithm] = h
		r = io.TeeReader(r, h)
	}
	n, err := io.Copy(ioutil.Discard, r)
	if err != nil {
		return FileMeta{}, err
	}
	m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))}
	for hashAlgorithm, h := range hashes {
		m.Hashes[hashAlgorithm] = h.Sum(nil)
	}
	return m, nil
}
Пример #17
0
// Update the object from in with modTime and size
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
	dir := path.Dir(o.path)
	err := os.MkdirAll(dir, 0777)
	if err != nil {
		return err
	}

	out, err := os.Create(o.path)
	if err != nil {
		return err
	}

	// Calculate the md5sum of the object we are reading as we go along
	hash := md5.New()
	in = io.TeeReader(in, hash)

	_, err = io.Copy(out, in)
	outErr := out.Close()
	if err != nil {
		return err
	}
	if outErr != nil {
		return outErr
	}

	// All successful so update the md5sum
	o.md5sum = hex.EncodeToString(hash.Sum(nil))

	// Set the mtime
	o.SetModTime(modTime)

	// ReRead info now that we have finished
	return o.lstat()
}
Пример #18
0
func (r *roundTripRecorder) RoundTrip(req *http.Request) (*http.Response, error) {
	reqBuf, resBuf := &bytes.Buffer{}, &bytes.Buffer{}
	if req.Body != nil {
		req.Body = readCloser{req.Body, io.TeeReader(req.Body, reqBuf)}
	}
	res, err := r.RoundTripper.RoundTrip(req)
	if err != nil {
		return nil, err
	}
	log := &request{req, res, reqBuf, resBuf}
	res.Body = readCloser{res.Body, io.TeeReader(res.Body, resBuf)}
	r.mtx.Lock()
	r.requests = append(r.requests, log)
	r.mtx.Unlock()
	return res, nil
}
Пример #19
0
func main() {
	if len(os.Args) < 2 {
		println("usage: " + os.Args[0] + " <destination>")
		os.Exit(1)
	}

	destination := os.Args[1]

	err := os.MkdirAll(destination, 0755)
	if err != nil {
		fatal("creating destination", err)
	}

	file, err := os.Create(filepath.Join(destination, "input"))
	if err != nil {
		fatal("creating input file", err)
	}

	defer file.Close()

	var request models.InRequest

	err = json.NewDecoder(io.TeeReader(os.Stdin, file)).Decode(&request)
	if err != nil {
		fatal("reading request", err)
	}

	inVersion := request.Version

	json.NewEncoder(os.Stdout).Encode(models.InResponse{
		Version: inVersion,
	})
}
Пример #20
0
// helper function to log the stream of bytes from a reader while waiting on
// the error channel. It returns on first error received on the channel
func logOutputAndReturnStatus(r io.Reader, errCh chan error, cancelCh CancelChannel,
	cancelFunc context.CancelFunc, jobLogs io.Writer) error {
	// this can happen if an error occurred before the ansible could be run,
	// just return that error
	if r == nil {
		return <-errCh
	}

	// redirect read output to job logs
	t := io.TeeReader(r, jobLogs)
	s := bufio.NewScanner(t)
	ticker := time.Tick(50 * time.Millisecond)
	for {
		var err error
		select {
		case <-cancelCh:
			err = errJobCancelled
			cancelFunc()
			for s.Scan() {
				logrus.Infof("%s", s.Bytes())
			}
			return err
		case err := <-errCh:
			for s.Scan() {
				logrus.Infof("%s", s.Bytes())
			}
			return err
		case <-ticker:
			// scan any available output while waiting
			if s.Scan() {
				logrus.Infof("%s", s.Bytes())
			}
		}
	}
}
// NewHostedProgram initializes, but does not start, a hosted docker container.
func (ldcf *LinuxDockerContainerFactory) NewHostedProgram(spec HostedProgramSpec) (child HostedProgram, err error) {

	// The imagename for the child is given by spec.ContainerArgs[0]
	argv0 := "cloudproxy"
	if len(spec.ContainerArgs) >= 1 {
		argv0 = spec.ContainerArgs[0]
	}
	img := argv0 + ":" + getRandomFileName(nameLen)

	inf, err := os.Open(spec.Path)
	defer inf.Close()
	if err != nil {
		return
	}

	// Build the docker image, and hash the image as it is sent.
	hasher := sha256.New()
	err = docker(io.TeeReader(inf, hasher), "build", "-t", img, "-q", "-")
	if err != nil {
		return
	}

	hash := hasher.Sum(nil)

	child = &DockerContainer{
		spec:      spec,
		ImageName: img,
		Hash:      hash,
		Factory:   ldcf,
		Done:      make(chan bool, 1),
	}

	return
}
Пример #22
0
// APIResourceHandler allows you to implement RESTful APIs easier
func APIResourceHandler(APIResource APIResource) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		b := bytes.NewBuffer(make([]byte, 0))
		reader := io.TeeReader(r.Body, b)

		r.Body = ioutil.NopCloser(b)
		defer r.Body.Close()

		r.ParseForm()

		// OAuth
		// logs.Trace.Printf("session: %#v", session)
		session, _, _ := CheckTwitterSession(w, r)

		// Delegate responsibility to the resource
		var status APIStatus
		var data interface{}

		switch r.Method {
		case options:
			status, data = APIResource.Options(&session, r.URL.Path, r.Form, reader)
		case get:
			status, data = APIResource.Get(&session, r.URL.Path, r.Form, reader)
		case post:
			status, data = APIResource.Post(&session, r.URL.Path, r.Form, reader)
		case put:
			status, data = APIResource.Put(&session, r.URL.Path, r.Form, reader)
		case patch:
			status, data = APIResource.Patch(&session, r.URL.Path, r.Form, reader)
		case delete:
			status, data = APIResource.Delete(&session, r.URL.Path, r.Form, reader)
		default:
			w.WriteHeader(http.StatusMethodNotAllowed)
			return
		}

		// Return API response
		var content []byte
		var e error

		if !status.success {
			content, e = json.Marshal(apienvelope{
				Header: apiheader{Status: "fail", Message: status.message},
			})
		} else {
			content, e = json.Marshal(apienvelope{
				Header:   apiheader{Status: "success"},
				Response: data,
			})
		}
		if e != nil {
			logs.Error.Printf("ERROR: %s %s", "json.Marshal@APIResourceHandler", e.Error())
			http.Error(w, e.Error(), http.StatusInternalServerError)
			return
		}
		w.Header().Set("Content-Type", "application/json")
		w.WriteHeader(status.code)
		w.Write(content)
	}
}
Пример #23
0
// pushLayer pushes the layer content returning the url on success.
func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string {
	digester := digest.Canonical.New()

	resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash()))
	if err != nil {
		t.Fatalf("unexpected error doing push layer request: %v", err)
	}
	defer resp.Body.Close()

	checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated)

	if err != nil {
		t.Fatalf("error generating sha256 digest of body")
	}

	sha256Dgst := digester.Digest()

	expectedLayerURL, err := ub.BuildBlobURL(name, sha256Dgst)
	if err != nil {
		t.Fatalf("error building expected layer url: %v", err)
	}

	checkHeaders(t, resp, http.Header{
		"Location":              []string{expectedLayerURL},
		"Content-Length":        []string{"0"},
		"Docker-Content-Digest": []string{sha256Dgst.String()},
	})

	return resp.Header.Get("Location")
}
Пример #24
0
func openRemote(oid string, l int64, cachePerc int, nl NodeList) (io.ReadCloser, error) {
	for _, sid := range nl {
		resp, err := sid.ClientForTransfer(l).Get(sid.BlobURL(oid))
		if err != nil {
			log.Printf("Error reading %s from node %v: %v",
				oid, sid, err)
			continue
		}

		if resp.StatusCode != 200 {
			log.Printf("Error response %v from node %v getting %v",
				resp.Status, sid, oid)
			resp.Body.Close()
			continue
		}

		shouldCache := cachePerc == 100 || (cachePerc > rand.Intn(100) &&
			availableSpace() > l)

		if !shouldCache {
			return resp.Body, nil
		}

		hw, err := NewHashRecord(*root, oid)
		r := io.TeeReader(resp.Body, hw)
		rv := &hwFinisher{r, hw, oid, l}
		return &readerClosers{rv, []io.Closer{rv, resp.Body}}, nil
	}
	return nil, fmt.Errorf("couldn't get ob from any of %v", nl)
}
Пример #25
0
func smudgeCommand(cmd *cobra.Command, args []string) {
	requireStdin("This command should be run by the Git 'smudge' filter")
	lfs.InstallHooks(false)

	// keeps the initial buffer from lfs.DecodePointer
	b := &bytes.Buffer{}
	r := io.TeeReader(os.Stdin, b)

	ptr, err := lfs.DecodePointer(r)
	if err != nil {
		mr := io.MultiReader(b, os.Stdin)
		_, err := io.Copy(os.Stdout, mr)
		if err != nil {
			Panic(err, "Error writing data to stdout:")
		}
		return
	}

	if smudgeInfo {
		localPath, err := lfs.LocalMediaPath(ptr.Oid)
		if err != nil {
			Exit(err.Error())
		}

		stat, err := os.Stat(localPath)
		if err != nil {
			Print("%d --", ptr.Size)
		} else {
			Print("%d %s", stat.Size(), localPath)
		}
		return
	}

	filename := smudgeFilename(args, err)
	cb, file, err := lfs.CopyCallbackFile("smudge", filename, 1, 1)
	if err != nil {
		Error(err.Error())
	}

	cfg := lfs.Config
	download := lfs.FilenamePassesIncludeExcludeFilter(filename, cfg.FetchIncludePaths(), cfg.FetchExcludePaths())

	if smudgeSkip || lfs.Config.GetenvBool("GIT_LFS_SKIP_SMUDGE", false) {
		download = false
	}

	err = ptr.Smudge(os.Stdout, filename, download, cb)
	if file != nil {
		file.Close()
	}

	if err != nil {
		ptr.Encode(os.Stdout)
		// Download declined error is ok to skip if we weren't requesting download
		if !(lfs.IsDownloadDeclinedError(err) && !download) {
			LoggedError(err, "Error accessing media: %s (%s)", filename, ptr.Oid)
			os.Exit(2)
		}
	}
}
Пример #26
0
func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
	tmpFile, err := ioutil.TempFile("", "busltee_buffer")
	if err != nil {
		return nil, err
	}
	t.bufferName = tmpFile.Name()
	t.mutex = &sync.Mutex{}

	go func() {
		defer tmpFile.Close()
		defer t.Close()

		tee := io.TeeReader(req.Body, tmpFile)
		_, err := ioutil.ReadAll(tee)
		if err != nil {
			log.Fatal(err)
		}
	}()

	if t.Transport == nil {
		t.Transport = &http.Transport{}
	}
	if t.SleepDuration == 0 {
		t.SleepDuration = time.Second
	}
	return t.tries(req)
}
Пример #27
0
func testPut(t *testing.T, file *fstest.Item) {
again:
	buf := bytes.NewBufferString(fstest.RandomString(100))
	hash := fs.NewMultiHasher()
	in := io.TeeReader(buf, hash)

	tries := 1
	const maxTries = 10
	file.Size = int64(buf.Len())
	obji := fs.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
	obj, err := remote.Put(in, obji)
	if err != nil {
		// Retry if err returned a retry error
		if fs.IsRetryError(err) && tries < maxTries {
			t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
			time.Sleep(2 * time.Second)

			tries++
			goto again
		}
		require.NoError(t, err, "Put error")
	}
	file.Hashes = hash.Sums()
	file.Check(t, obj, remote.Precision())
	// Re-read the object and check again
	obj = findObject(t, file.Path)
	file.Check(t, obj, remote.Precision())
}
Пример #28
0
func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
	digester := digest.Canonical.New()
	tr := io.TeeReader(ts, digester.Hash())

	tsw, err := tx.TarSplitWriter(true)
	if err != nil {
		return err
	}
	metaPacker := storage.NewJSONPacker(tsw)
	defer tsw.Close()

	// we're passing nil here for the file putter, because the ApplyDiff will
	// handle the extraction of the archive
	rdr, err := asm.NewInputTarStream(tr, metaPacker, nil)
	if err != nil {
		return err
	}

	applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr))
	if err != nil {
		return err
	}

	// Discard trailing data but ensure metadata is picked up to reconstruct stream
	io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed

	layer.size = applySize
	layer.diffID = DiffID(digester.Digest())

	logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)

	return nil
}
Пример #29
0
func getFile(filename string, cp chan *cachePutRequest, cacheCheck chan *cacheCheckRequest, fileSent chan bool) (io.Reader, error) {
	cacheReq := new(cacheCheckRequest)
	cacheReq.name = filename
	cacheReq.isNotCached = make(chan bool)
	cacheReq.isCached = make(chan *cachePutRequest)

	//send cache check request
	cacheCheck <- cacheReq
	//handle cache request
	select {
	case c := <-cacheReq.isCached:
		log.Println("Cache hit sending", c.name, "to the client")
		return c.file, nil
	case <-cacheReq.isNotCached:
		log.Println("Cache miss sending", filename, "to the client")

		fd, err := os.Open(filename)
		if err != nil {
			return fd, err
		}

		//when cacheFileReader.Read is called, responseFileReader will be written to
		responseFileReader := new(bytes.Buffer)
		cacheFileReader := io.TeeReader(fd, responseFileReader)

		cachePut := new(cachePutRequest)
		cachePut.name = filename
		cachePut.file = responseFileReader
		cachePut.fileIsSent = fileSent
		//the cache needs to know when the file has been read so it can write it to the

		cp <- cachePut
		return cacheFileReader, nil
	}
}
Пример #30
0
// downloadPiece attempts to retrieve a file piece from a host.
func (d *Download) downloadPiece(piece filePiece) error {
	conn, err := net.DialTimeout("tcp", string(piece.HostIP), 10e9)
	if err != nil {
		return err
	}
	defer conn.Close()
	err = encoding.WriteObject(conn, [8]byte{'R', 'e', 't', 'r', 'i', 'e', 'v', 'e'})
	if err != nil {
		return err
	}

	// Send the ID of the contract for the file piece we're requesting.
	if err := encoding.WriteObject(conn, piece.ContractID); err != nil {
		return err
	}

	// Simultaneously download, decrypt, and calculate the Merkle root of the file.
	tee := io.TeeReader(
		// Use a LimitedReader to ensure we don't read indefinitely.
		io.LimitReader(conn, int64(piece.Contract.FileSize)),
		// Write the decrypted bytes to the file.
		piece.EncryptionKey.NewWriter(d),
	)
	merkleRoot, err := crypto.ReaderMerkleRoot(tee)
	if err != nil {
		return err
	}

	if merkleRoot != piece.Contract.FileMerkleRoot {
		return errors.New("host provided a file that's invalid")
	}

	return nil
}