Пример #1
0
func newMultipartReader(parts []typeReader) *multipartReader {
	mp := &multipartReader{pipeOpen: true}
	var pw *io.PipeWriter
	mp.pr, pw = io.Pipe()
	mpw := multipart.NewWriter(pw)
	mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
	go func() {
		for _, part := range parts {
			w, err := mpw.CreatePart(typeHeader(part.typ))
			if err != nil {
				mpw.Close()
				pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
				return
			}
			_, err = io.Copy(w, part.Reader)
			if err != nil {
				mpw.Close()
				pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
				return
			}
		}

		mpw.Close()
		pw.Close()
	}()
	return mp
}
Пример #2
0
// startReading starts a goroutine receiving the lines out of the reader
// in the background and passing them to a created string channel. This
// will used in the assertions.
func startReading(c *gc.C, tailer *tailer.Tailer, reader *io.PipeReader, writer *io.PipeWriter) chan string {
	linec := make(chan string)
	// Start goroutine for reading.
	go func() {
		defer close(linec)
		reader := bufio.NewReader(reader)
		for {
			line, err := reader.ReadString('\n')
			switch err {
			case nil:
				linec <- line
			case io.EOF:
				return
			default:
				c.Fail()
			}
		}
	}()
	// Close writer when tailer is stopped or has an error. Tailer using
	// components can do it the same way.
	go func() {
		tailer.Wait()
		writer.Close()
	}()
	return linec
}
Пример #3
0
func teardownPeer(t *testing.T, c *Client,
	in *io.PipeReader, out *io.PipeWriter) {
	// in.Close()
	out.Close()
	c.shares.halt()
	os.RemoveAll(c.DownloadRoot)
}
Пример #4
0
func sendSnapFile(snapPath string, snapFile *os.File, pw *io.PipeWriter, mw *multipart.Writer, action *actionData) {
	defer snapFile.Close()

	if action.SnapOptions == nil {
		action.SnapOptions = &SnapOptions{}
	}
	errs := []error{
		mw.WriteField("action", action.Action),
		mw.WriteField("name", action.Name),
		mw.WriteField("snap-path", action.SnapPath),
		mw.WriteField("channel", action.Channel),
		mw.WriteField("devmode", strconv.FormatBool(action.DevMode)),
	}
	for _, err := range errs {
		if err != nil {
			pw.CloseWithError(err)
			return
		}
	}

	fw, err := mw.CreateFormFile("snap", filepath.Base(snapPath))
	if err != nil {
		pw.CloseWithError(err)
		return
	}

	_, err = io.Copy(fw, snapFile)
	if err != nil {
		pw.CloseWithError(err)
		return
	}

	mw.Close()
	pw.Close()
}
Пример #5
0
func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) {
	for _, part := range parts.Part {
		recvMD5 := part.ETag
		object, ok := donut.multiPartObjects[uploadID].Get(part.PartNumber)
		if ok == false {
			fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{})))
			return
		}
		calcMD5Bytes := md5.Sum(object)
		// complete multi part request header md5sum per part is hex encoded
		recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\""))
		if err != nil {
			fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5})))
			return
		}
		if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) {
			fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{})))
			return
		}

		if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil {
			fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err)))
			return
		}
		object = nil
	}
	fullObjectWriter.Close()
	return
}
Пример #6
0
// Encode the file and request parameters in a multipart body.
// File contents are streamed into the request using an io.Pipe in a separated goroutine
func streamUploadBody(client *FlickrClient, photo io.Reader, body *io.PipeWriter, fileName string, boundary string) {
	// multipart writer to fill the body
	defer body.Close()
	writer := multipart.NewWriter(body)
	writer.SetBoundary(boundary)

	// create the "photo" field
	part, err := writer.CreateFormFile("photo", filepath.Base(fileName))
	if err != nil {
		log.Fatal(err)
		return
	}

	// fill the photo field
	_, err = io.Copy(part, photo)
	if err != nil {
		log.Fatal(err)
		return
	}

	// dump other params
	for key, val := range client.Args {
		_ = writer.WriteField(key, val[0])
	}

	// close the form writer
	err = writer.Close()
	if err != nil {
		log.Fatal(err)
		return
	}
}
Пример #7
0
func StreamWriteMultipartForm(params map[string]string, fileField, path, boundary string, pw *io.PipeWriter, buf *bytes.Buffer) {
	defer pw.Close()
	mpw := multipart.NewWriter(pw)
	mpw.SetBoundary(boundary)
	if fileField != "" && path != "" {
		fw, err := mpw.CreateFormFile(fileField, filepath.Base(path))
		if err != nil {
			log.Fatal(err)
			return
		}
		if buf != nil {
			_, err = io.Copy(fw, buf)
			if err != nil {
				log.Fatal(err)
				return
			}
		}
	}
	for key, val := range params {
		_ = mpw.WriteField(key, val)
	}
	err := mpw.Close()
	if err != nil {
		log.Fatal(err)
		return
	}
}
Пример #8
0
// We overload the Wait() method to enable subprocess termination if a
// timeout has been exceeded.
func (mc *ManagedCmd) Wait() (err error) {
	go func() {
		mc.done <- mc.Cmd.Wait()
	}()

	if mc.timeout_duration != 0 {
		select {
		case <-mc.Stopchan:
			err = fmt.Errorf("CommandChain was stopped with error: [%s]", mc.kill())
		case <-time.After(mc.timeout_duration):
			err = fmt.Errorf("CommandChain timedout with error: [%s]", mc.kill())
		case err = <-mc.done:
		}
	} else {
		select {
		case <-mc.Stopchan:
			err = fmt.Errorf("CommandChain was stopped with error: [%s]", mc.kill())
		case err = <-mc.done:
		}
	}

	var writer *io.PipeWriter
	var ok bool

	writer, ok = mc.Stdout.(*io.PipeWriter)
	if ok {
		writer.Close()
	}
	writer, ok = mc.Stderr.(*io.PipeWriter)
	if ok {
		writer.Close()
	}

	return err
}
Пример #9
0
// Streams upload directly from file -> mime/multipart -> pipe -> http-request
func streamingUploadFile(params map[string]string, paramName, path string, w *io.PipeWriter, file *os.File) {
	defer file.Close()
	defer w.Close()
	writer := multipart.NewWriter(w)
	part, err := writer.CreateFormFile(paramName, filepath.Base(path))
	if err != nil {
		log.Fatal(err)
		return
	}
	_, err = io.Copy(part, file)
	if err != nil {
		log.Fatal(err)
		return
	}

	for key, val := range params {
		_ = writer.WriteField(key, val)
	}

	err = writer.Close()
	if err != nil {
		log.Fatal(err)
		return
	}
}
Пример #10
0
func HandleRead(filename string, w *io.PipeWriter) {
	fmt.Printf("Filename : %v \n", []byte(filename))
	var exists bool
	d, err := localConf.fs.Get("tftp/" + filename[0:len(filename)-1])
	defer d.Close()
	fmt.Println(d, err)
	if err == nil {
		exists = true
	}
	if exists {
		// copy all the data into a buffer
		data, err := ioutil.ReadAll(d)
		if err != nil {
			fmt.Fprintf(os.Stderr, "Copy Error : %v\n", err)
		}
		buf := bytes.NewBuffer(data)
		c, e := io.Copy(w, buf)
		d.Close()
		if e != nil {
			fmt.Fprintf(os.Stderr, "Can't send %s: %v\n", filename, e)
		} else {
			fmt.Fprintf(os.Stderr, "Sent %s (%d bytes)\n", filename, c)
		}
		w.Close()
	} else {
		w.CloseWithError(fmt.Errorf("File not exists: %s", filename))
	}
}
Пример #11
0
// readEncodedData -
func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutObjectMetadata map[string]string) {
	expectedMd5sum, err := hex.DecodeString(donutObjectMetadata["sys.md5"])
	if err != nil {
		writer.CloseWithError(iodine.New(err, nil))
		return
	}
	readers, err := b.getDiskReaders(objectName, "data")
	if err != nil {
		writer.CloseWithError(iodine.New(err, nil))
		return
	}
	hasher := md5.New()
	mwriter := io.MultiWriter(writer, hasher)
	switch len(readers) == 1 {
	case false:
		totalChunks, totalLeft, blockSize, k, m, err := b.donutMetadata2Values(donutObjectMetadata)
		if err != nil {
			writer.CloseWithError(iodine.New(err, nil))
			return
		}
		technique, ok := donutObjectMetadata["sys.erasureTechnique"]
		if !ok {
			writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil))
			return
		}
		encoder, err := NewEncoder(uint8(k), uint8(m), technique)
		if err != nil {
			writer.CloseWithError(iodine.New(err, nil))
			return
		}
		for i := 0; i < totalChunks; i++ {
			decodedData, err := b.decodeEncodedData(totalLeft, blockSize, readers, encoder, writer)
			if err != nil {
				writer.CloseWithError(iodine.New(err, nil))
				return
			}
			_, err = io.Copy(mwriter, bytes.NewBuffer(decodedData))
			if err != nil {
				writer.CloseWithError(iodine.New(err, nil))
				return
			}
			totalLeft = totalLeft - int64(blockSize)
		}
	case true:
		_, err := io.Copy(writer, readers[0])
		if err != nil {
			writer.CloseWithError(iodine.New(err, nil))
			return
		}
	}
	// check if decodedData md5sum matches
	if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
		writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil))
		return
	}
	writer.Close()
	return
}
Пример #12
0
func (s *GardenServer) streamProcess(logger lager.Logger, conn net.Conn, process garden.Process, stdinPipe *io.PipeWriter, connCloseCh chan struct{}) {
	statusCh := make(chan int, 1)
	errCh := make(chan error, 1)

	go func() {
		status, err := process.Wait()
		if err != nil {
			logger.Error("wait-failed", err, lager.Data{
				"id": process.ID(),
			})

			errCh <- err
		} else {
			logger.Info("exited", lager.Data{
				"status": status,
				"id":     process.ID(),
			})

			statusCh <- status
		}
	}()

	for {
		select {

		case status := <-statusCh:
			transport.WriteMessage(conn, &transport.ProcessPayload{
				ProcessID:  process.ID(),
				ExitStatus: &status,
			})

			stdinPipe.Close()
			return

		case err := <-errCh:
			e := err.Error()
			transport.WriteMessage(conn, &transport.ProcessPayload{
				ProcessID: process.ID(),
				Error:     &e,
			})

			stdinPipe.Close()
			return

		case <-s.stopping:
			logger.Debug("detaching", lager.Data{
				"id": process.ID(),
			})

			return

		case <-connCloseCh:

			return
		}
	}
}
Пример #13
0
// readEncodedData -
func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) {
	readers, err := b.getDiskReaders(objectName, "data")
	if err != nil {
		writer.CloseWithError(iodine.New(err, nil))
		return
	}
	for _, reader := range readers {
		defer reader.Close()
	}
	expectedMd5sum, err := hex.DecodeString(objMetadata.MD5Sum)
	if err != nil {
		writer.CloseWithError(iodine.New(err, nil))
		return
	}
	hasher := md5.New()
	mwriter := io.MultiWriter(writer, hasher)
	switch len(readers) == 1 {
	case false:
		if objMetadata.ErasureTechnique == "" {
			writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil))
			return
		}
		encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique)
		if err != nil {
			writer.CloseWithError(iodine.New(err, nil))
			return
		}
		totalLeft := objMetadata.Size
		for i := 0; i < objMetadata.ChunkCount; i++ {
			decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer)
			if err != nil {
				writer.CloseWithError(iodine.New(err, nil))
				return
			}
			_, err = io.Copy(mwriter, bytes.NewBuffer(decodedData))
			if err != nil {
				writer.CloseWithError(iodine.New(err, nil))
				return
			}
			totalLeft = totalLeft - int64(objMetadata.BlockSize)
		}
	case true:
		_, err := io.Copy(writer, readers[0])
		if err != nil {
			writer.CloseWithError(iodine.New(err, nil))
			return
		}
	}
	// check if decodedData md5sum matches
	if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) {
		writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil))
		return
	}
	writer.Close()
	return
}
Пример #14
0
func PipeWriter(data map[string]interface{}) {
	w := io.PipeWriter{}
	defer w.Close()
	w.Write([]byte("pipeWriter"))
	// err := tpl.Execute(w, data)
	// if err != nil {
	// 	fmt.Println(err)
	// }

}
Пример #15
0
// Streams upload directly from file -> mime/multipart -> pipe -> http-request
func manageStream(dst *io.PipeWriter, src io.Reader) error {

	// Close the stream when the job is done
	defer dst.Close()
	// Copy
	n, err := io.Copy(dst, src)

	fmt.Println(n)

	if err != nil {
		return err
	}

	return err
}
Пример #16
0
func cleanup(p1, p2 *io.PipeWriter, stdinCh, stderrCh chan error) error {
	p1.Close()
	p2.Close()
	if stdinCh != nil {
		if err := <-stdinCh; err != nil {
			return err
		}
	}
	if stderrCh != nil {
		if err := <-stderrCh; err != nil {
			return err
		}
	}
	return nil
}
Пример #17
0
func joinFilesInGoRoutine(fileInfos []os.FileInfo, writer *io.PipeWriter) {
	for _, fileInfo := range fileInfos {
		file, err := os.Open(fileInfo.Name())
		defer file.Close()
		for err != nil {
			writer.CloseWithError(err)
			return
		}
		_, err = io.Copy(writer, file)
		if err != nil {
			writer.CloseWithError(err)
			return
		}
	}
	writer.Close()
}
Пример #18
0
func (s *Sequence) done(p1, p2 *io.PipeWriter, stdinCh, stderrCh chan error) error {
	p1.Close()
	p2.Close()
	defer s.reset()
	if stdinCh != nil {
		if err := <-stdinCh; err != nil {
			return err
		}
	}
	if stderrCh != nil {
		if err := <-stderrCh; err != nil {
			return err
		}
	}
	return nil
}
Пример #19
0
func readDat(filename string, c chan io.Reader) {
	f, err := os.Open("testdata/webkit/" + filename)
	if err != nil {
		c <- pipeErr(err)
		return
	}
	defer f.Close()

	// Loop through the lines of the file. Each line beginning with "#" denotes
	// a new section, which is returned as a separate io.Reader.
	r := bufio.NewReader(f)
	var pw *io.PipeWriter
	for {
		line, err := r.ReadSlice('\n')
		if err != nil {
			if pw != nil {
				pw.CloseWithError(err)
				pw = nil
			} else {
				c <- pipeErr(err)
			}
			return
		}
		if len(line) == 0 {
			continue
		}
		if line[0] == '#' {
			if pw != nil {
				pw.Close()
			}
			var pr *io.PipeReader
			pr, pw = io.Pipe()
			c <- pr
			continue
		}
		if line[0] != '|' {
			// Strip the trailing '\n'.
			line = line[:len(line)-1]
		}
		if pw != nil {
			if _, err := pw.Write(line); err != nil {
				pw.CloseWithError(err)
				pw = nil
			}
		}
	}
}
Пример #20
0
func logHandler(name string, debug bool, actionFunc func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
	return func(w http.ResponseWriter, r *http.Request) {
		if debug {
			buf := new(bytes.Buffer)
			io.Copy(buf, r.Body)
			log.Debugf("Dispatching %s with %v", name, strings.TrimSpace(string(buf.Bytes())))
			var writer *io.PipeWriter
			r.Body, writer = io.Pipe()
			go func() {
				io.Copy(writer, buf)
				writer.Close()
			}()
		}

		actionFunc(w, r)
	}
}
func importFile(filename string) {
	file, err := os.Open(filename)

	if err != nil {
		log.Printf("Error: %v\n", err)
		return
	}
	defer file.Close()

	stats, _ := file.Stat()
	fileSize := stats.Size()

	log.Printf("Importing %v", filename)

	reader := bufio.NewReaderSize(file, 1024*1024)

	var resps = make(chan Response, 100)
	go handleResponses(filename, fileSize, resps)

	var pReader *io.PipeReader
	var pWriter *io.PipeWriter
	var i int
	for i = 0; err == nil; i++ {
		if i%250 == 0 {
			if pWriter != nil {
				pWriter.Close()
			}
			pReader, pWriter = io.Pipe()
			reqs <- Request{pReader, resps}
		}

		var line []byte
		line, err = reader.ReadBytes('\n')
		pWriter.Write(line)
	}

	if pWriter != nil {
		pWriter.Close()
	}

	if err != nil && err != io.EOF {
		log.Panicf("Scanner error: %v\n", err)
	}

	resps <- Response{nil, nil, true, i - 1}
}
Пример #22
0
// secReadLoop copies data from r into pw
// doing a nacl open decryption on the data in the process using shared as the key
func secReadLoop(r io.Reader, pw *io.PipeWriter, shared *[32]byte) {
	var failed bool
	// check for an error, stops the loop and
	// closes the pipe with err to signal the reader we failed
	var check = func(err error) {
		if err != nil {
			log.Println("secReadLoop err:", err)
			if err2 := pw.CloseWithError(err); err2 != nil {
				log.Println("CloseWithError failed", err2)
			}
			failed = true
		}
	}
	for !failed { // until an error occurs
		// read next ciphered message from the passed reader
		msg := make([]byte, 32*1024)
		n, err := io.ReadAtLeast(r, msg, 25)
		// the closed conn check could be nicer but there is no way to access the abstracted TCPConn cleanly with the pipes involved
		if err != nil && (err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")) {
			checkFatal(pw.Close())
			return
		}
		check(err)

		// slice of the unused rest of the buffer
		msg = msg[:n]

		// copy the nonce from the message
		var nonce [24]byte
		copy(nonce[:], msg[:24])

		// cut of the nonce
		msg = msg[24:]

		// decrypt message
		clearMsg, ok := box.OpenAfterPrecomputation([]byte{}, msg, &nonce, shared)
		if !ok {
			check(errors.New("open failed"))
		}

		// copy the decrypted message to our pipe
		_, err = io.Copy(pw, bytes.NewReader(clearMsg))
		check(err)
	}
}
Пример #23
0
// HandleRequest ..
func HandleRequest(api *api.API, body io.Reader, bodyWriter *io.PipeWriter, ip string) error {
	defer func() {
		_ = bodyWriter.Close()
	}()

	omahaReq, err := readOmahaRequest(body)
	if err != nil {
		logger.Warn("HandleRequest problem with readOmahaRequest", "error", err.Error())
		return ErrMalformedRequest
	}

	omahaResp, err := buildOmahaResponse(api, omahaReq, ip)
	if err != nil {
		logger.Warn("HandleRequest problem with buildOmahaResponse", "error", err.Error())
		return ErrMalformedResponse
	}

	return writeXMLResponse(bodyWriter, omahaResp)
}
Пример #24
0
// Streams upload directly from file -> mime/multipart -> pipe -> http-request
func streamingUploadFile(id, field, path, store string, w *io.PipeWriter, file io.Reader) {
	// defer file.Close()
	defer w.Close()
	writer := multipart.NewWriter(w)
	part, err := writer.CreateFormFile("file", filepath.Base(path))
	if err != nil {
		log.Fatal("err", "err", err)
		return
	}
	_, err = io.Copy(part, file)
	if err != nil {
		log.Fatal("err", "err", err)
		return
	}
	err = writer.Close()
	if err != nil {
		log.Fatal("err", "err", err)
		return
	}
}
Пример #25
0
func (c *combinedReader) readTo(r io.Reader, w *io.PipeWriter) {
	p := make([]byte, 1e5)
	for {
		n, err := r.Read(p)
		if n > 0 {
			c.wlk.Lock()
			w.Write(p[:n])
			c.wlk.Unlock()
		}
		if err != nil {
			c.wlk.Lock()
			defer c.wlk.Unlock()
			c.closed++
			if c.closed == 2 {
				w.Close()
			}
			return
		}
	}
}
Пример #26
0
// We overload the Wait() method to enable subprocess termination if a
// timeout has been exceeded.
func (mc *ManagedCmd) Wait() (err error) {
	go func() {
		mc.done <- mc.Cmd.Wait()
	}()

	done := false
	if mc.timeout_duration != 0 {
		for !done {
			select {
			case <-mc.Stopchan:
				err = fmt.Errorf("ManagedCmd was stopped with error: [%s]", mc.kill())
				done = true
			case <-time.After(mc.timeout_duration):
				mc.Stopchan <- true
				err = fmt.Errorf("ManagedCmd timedout")
			case err = <-mc.done:
				done = true
			}
		}
	} else {
		select {
		case <-mc.Stopchan:
			err = fmt.Errorf("ManagedCmd was stopped with error: [%s]", mc.kill())
		case err = <-mc.done:
		}
	}

	var writer *io.PipeWriter
	var ok bool

	writer, ok = mc.Stdout.(*io.PipeWriter)
	if ok {
		writer.Close()
	}
	writer, ok = mc.Stderr.(*io.PipeWriter)
	if ok {
		writer.Close()
	}

	return err
}
Пример #27
0
func (c *combinedReader) readTo(r io.Reader, w *io.PipeWriter) {
	p := make([]byte, 1e5)
	for {
		// If an incoming line of text is longer then len(p), it may be interleaved with other side content
		n, err := r.Read(p)
		if n > 0 {
			c.wlk.Lock()
			w.Write(p[:n])
			c.wlk.Unlock()
		}
		if err != nil {
			c.wlk.Lock()
			defer c.wlk.Unlock()
			c.closed++
			if c.closed == 2 {
				w.Close()
			}
			return
		}
	}
}
Пример #28
0
// streamingUpload streams a file via a pipe through a multipart.Writer.
// Generally one should use newStreamingUpload instead of calling this directly.
func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) {
	defer GinkgoRecover()
	defer file.Close()
	defer w.Close()

	// Set up the form file
	fileWriter, err := postBodyWriter.CreateFormFile("file", fileName)
	if err != nil {
		Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err)
	}

	// Copy kubectl binary into the file writer
	if _, err := io.Copy(fileWriter, file); err != nil {
		Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err)
	}

	// Nothing more should be written to this instance of the postBodyWriter
	if err := postBodyWriter.Close(); err != nil {
		Failf("Unable to close the writer for file upload. Error: %s", err)
	}
}
Пример #29
0
// Write will create sitemap xml file into the s3.
func (adp *S3Adapter) Write(loc *Location, data []byte) {
	var reader io.Reader = bytes.NewReader(data)

	if GzipPtn.MatchString(loc.Filename()) {
		var writer *io.PipeWriter

		reader, writer = io.Pipe()
		go func() {
			gz := gzip.NewWriter(writer)
			io.Copy(gz, bytes.NewReader(data))

			gz.Close()
			writer.Close()
		}()
	}

	var creds *credentials.Credentials
	if adp.Credentials == nil {
		creds = credentials.NewEnvCredentials()
	} else {
		creds = adp.Credentials
	}

	creds.Get()

	sess := session.New(&aws.Config{
		Credentials: creds, Region: &adp.Region})

	uploader := s3manager.NewUploader(sess)
	_, err := uploader.Upload(&s3manager.UploadInput{
		Bucket: aws.String(adp.Bucket),
		Key:    aws.String(loc.PathInPublic()),
		ACL:    aws.String(adp.ACL),
		Body:   reader,
	})

	if err != nil {
		log.Fatal("[F] S3 Upload file Error:", err)
	}
}
Пример #30
0
func (s *GardenServer) streamInput(decoder *json.Decoder, in *io.PipeWriter, process garden.Process, connCloseCh chan struct{}) {
	for {
		var payload transport.ProcessPayload
		err := decoder.Decode(&payload)
		if err != nil {
			close(connCloseCh)
			in.CloseWithError(errors.New("Connection closed"))
			return
		}

		switch {
		case payload.TTY != nil:
			process.SetTTY(*payload.TTY)

		case payload.Source != nil:
			if payload.Data == nil {
				in.Close()
				return
			} else {
				_, err := in.Write([]byte(*payload.Data))
				if err != nil {
					return
				}
			}

		case payload.Signal != nil:
			s.logger.Info("stream-input-process-signal", lager.Data{"payload": payload})

			switch *payload.Signal {
			case garden.SignalKill:
				err = process.Signal(garden.SignalKill)
				if err != nil {
					s.logger.Error("stream-input-process-signal-kill-failed", err, lager.Data{"payload": payload})
				}
			case garden.SignalTerminate:
				err = process.Signal(garden.SignalTerminate)
				if err != nil {
					s.logger.Error("stream-input-process-signal-terminate-failed", err, lager.Data{"payload": payload})
				}
			default:
				s.logger.Error("stream-input-unknown-process-payload-signal", nil, lager.Data{"payload": payload})
				in.Close()
				return
			}

		default:
			s.logger.Error("stream-input-unknown-process-payload", nil, lager.Data{"payload": payload})
			in.Close()
			return
		}
	}
}