Пример #1
0
func Fuzz(data []byte) int {
	cfg, err := gif.DecodeConfig(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	if cfg.Width*cfg.Height > 1e6 {
		return 0
	}
	img, err := gif.Decode(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	for c := 1; c <= 256; c += 21 {
		var w bytes.Buffer
		err = gif.Encode(&w, img, &gif.Options{NumColors: c})
		if err != nil {
			panic(err)
		}
		img1, err := gif.Decode(&w)
		if err != nil {
			panic(err)
		}
		b0 := img.Bounds()
		b1 := img1.Bounds()
		if b0.Max.X-b0.Min.X != b1.Max.X-b1.Min.X || b0.Max.Y-b0.Min.Y != b1.Max.Y-b1.Min.Y {
			fmt.Printf("img0: %#v\n", img.Bounds())
			fmt.Printf("img1: %#v\n", img1.Bounds())
			panic("bounds changed")
		}
	}
	return 1
}
Пример #2
0
// FetchUser will go to Cloud Foundry and access basic information about the user.
func (p *Provider) FetchUser(session goth.Session) (goth.User, error) {
	s := session.(*Session)
	user := goth.User{
		AccessToken:  s.AccessToken,
		Provider:     p.Name(),
		RefreshToken: s.RefreshToken,
		ExpiresAt:    s.ExpiresAt,
	}
	req, err := http.NewRequest("GET", p.UserInfoURL, nil)
	if err != nil {
		return user, err
	}
	req.Header.Set("Authorization", "Bearer "+s.AccessToken)
	resp, err := p.Client.Do(req)
	if err != nil {
		if resp != nil {
			resp.Body.Close()
		}
		return user, err
	}
	defer resp.Body.Close()

	bits, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return user, err
	}

	err = json.NewDecoder(bytes.NewReader(bits)).Decode(&user.RawData)
	if err != nil {
		return user, err
	}

	err = userFromReader(bytes.NewReader(bits), &user)
	return user, err
}
Пример #3
0
// PutPage writes a range of pages to a page blob or clears the given range.
// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned
// with 512-byte boundaries and chunk must be of size multiplies by 512.
//
// See https://msdn.microsoft.com/en-us/library/ee691975.aspx
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error {
	path := fmt.Sprintf("%s/%s", container, name)
	uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
	headers := b.client.getStandardHeaders()
	headers["x-ms-blob-type"] = string(BlobTypePage)
	headers["x-ms-page-write"] = string(writeType)
	headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte)
	for k, v := range extraHeaders {
		headers[k] = v
	}
	var contentLength int64
	var data io.Reader
	if writeType == PageWriteTypeClear {
		contentLength = 0
		data = bytes.NewReader([]byte{})
	} else {
		contentLength = int64(len(chunk))
		data = bytes.NewReader(chunk)
	}
	headers["Content-Length"] = fmt.Sprintf("%v", contentLength)

	resp, err := b.client.exec("PUT", uri, headers, data)
	if err != nil {
		return err
	}
	defer resp.body.Close()

	return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
func (s *TestSuite) TestFuncs(c *C) {
	var err error
	body := []byte("this is only a test file")

	key := "test_file"
	key1 := "test_file_1"
	key2 := "test_file_2"

	err = s.service.PutObject(key1, bytes.NewReader(body))
	c.Assert(err, IsNil)
	err = s.service.PutObject(key2, bytes.NewReader(body))
	c.Assert(err, IsNil)

	objs, err := s.service.ListObjects(key)
	c.Assert(err, IsNil)
	c.Assert(objs, HasLen, 2)

	r, err := s.service.GetObject(key1)
	c.Assert(err, IsNil)

	newBody, err := ioutil.ReadAll(r)
	c.Assert(err, IsNil)
	c.Assert(newBody, DeepEquals, body)

	err = s.service.DeleteObjects([]string{key})
	c.Assert(err, IsNil)

	objs, err = s.service.ListObjects(key)
	c.Assert(err, IsNil)
	c.Assert(objs, HasLen, 0)
}
Пример #5
0
// This may take a minute or more due to the minimum size accepted S3
// on multipart upload parts.
func (s *ClientTests) TestMultiComplete(c *gocheck.C) {
	b := testBucket(s.s3)
	err := b.PutBucket(s3.Private)
	c.Assert(err, gocheck.IsNil)

	multi, err := b.InitMulti("multi", "text/plain", s3.Private)
	c.Assert(err, gocheck.IsNil)
	c.Assert(multi.UploadId, gocheck.Matches, ".+")
	defer multi.Abort()

	// Minimum size S3 accepts for all but the last part is 5MB.
	data1 := make([]byte, 5*1024*1024)
	data2 := []byte("<part 2>")

	part1, err := multi.PutPart(1, bytes.NewReader(data1))
	c.Assert(err, gocheck.IsNil)
	part2, err := multi.PutPart(2, bytes.NewReader(data2))
	c.Assert(err, gocheck.IsNil)

	// Purposefully reversed. The order requirement must be handled.
	err = multi.Complete([]s3.Part{part2, part1})
	c.Assert(err, gocheck.IsNil)

	data, err := b.Get("multi")
	c.Assert(err, gocheck.IsNil)

	c.Assert(len(data), gocheck.Equals, len(data1)+len(data2))
	for i := range data1 {
		if data[i] != data1[i] {
			c.Fatalf("uploaded object at byte %d: want %d, got %d", data1[i], data[i])
		}
	}
	c.Assert(string(data[len(data1):]), gocheck.Equals, string(data2))
}
Пример #6
0
func TestTip(t *testing.T) {
	expect := "fmt/669"
	err := setup()
	if err != nil {
		t.Error(err)
	}
	buf := bytes.NewReader([]byte{0x00, 0x4d, 0x52, 0x4d, 0x00})
	c, err := s.Identify("test.mrw", buf)
	for i := range c {
		if i.String() != expect {
			t.Errorf("First buffer: expecting %s, got %s", expect, i)
		}
	}
	buf = bytes.NewReader([]byte{0x00, 0x4d, 0x52, 0x4d, 0x00})
	c, err = s.Identify("test.mrw", buf)
	for i := range c {
		if i.String() != expect {
			t.Errorf("Second buffer: expecting %s, got %s", expect, i)
		}
	}
	buf = bytes.NewReader([]byte{0x00, 0x4d, 0x52, 0x4d, 0x00})
	c, err = s.Identify("test.mrw", buf)
	for i := range c {
		if i.String() != expect {
			t.Errorf("Third buffer: expecting %s, got %s", expect, i)
		}
	}
}
Пример #7
0
func TestUnmarshal(t *testing.T) {
	for i, tt := range unmarshalTests {
		var scan Scanner
		in := []byte(tt.in)
		if err := checkValid(in, &scan); err != nil {
			if !reflect.DeepEqual(err, tt.err) {
				t.Errorf("#%d: checkValid: %#v", i, err)
				continue
			}
		}
		if tt.ptr == nil {
			continue
		}

		// v = new(right-type)
		v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
		dec := NewDecoder(bytes.NewReader(in))
		if tt.useNumber {
			dec.UseNumber()
		}
		if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
			t.Errorf("#%d: %v, want %v", i, err, tt.err)
			continue
		} else if err != nil {
			continue
		}
		if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
			t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
			data, _ := Marshal(v.Elem().Interface())
			println(string(data))
			data, _ = Marshal(tt.out)
			println(string(data))
			continue
		}

		// Check round trip.
		if tt.err == nil {
			enc, err := Marshal(v.Interface())
			if err != nil {
				t.Errorf("#%d: error re-marshaling: %v", i, err)
				continue
			}
			vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
			dec = NewDecoder(bytes.NewReader(enc))
			if tt.useNumber {
				dec.UseNumber()
			}
			if err := dec.Decode(vv.Interface()); err != nil {
				t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
				continue
			}
			if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
				t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
				t.Errorf("     In: %q", strings.Map(noSpace, string(in)))
				t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
				continue
			}
		}
	}
}
Пример #8
0
func (b *Broadcast) broadcaster(in <-chan msgInfo) {
	mx := mux.StandardMux()
	for {
		mi, ok := <-in
		if !ok {
			return
		}

		buf := bytes.Buffer{}
		mx.Encoder(&buf).Encode(mi.msg)
		bts := buf.Bytes()

		b.neighbsmu.RLock()
		for conn, _ := range b.neighbsPri {
			if mi.sender != conn {
				go io.Copy(conn, bytes.NewReader(bts))
			}
		}
		for conn, _ := range b.neighbsSec {
			if mi.sender != conn {
				go io.Copy(conn, bytes.NewReader(bts))
			}
		}
		b.neighbsmu.RUnlock()
	}
}
Пример #9
0
// TestStorageProof builds a storage proof and checks that it verifies
// correctly.
func TestStorageProof(t *testing.T) {
	// generate proof data
	numSegments := uint64(7)
	data := make([]byte, numSegments*SegmentSize)
	rand.Read(data)
	rootHash, err := ReaderMerkleRoot(bytes.NewReader(data))
	if err != nil {
		t.Fatal(err)
	}

	// create and verify proofs for all indices
	for i := uint64(0); i < numSegments; i++ {
		baseSegment, hashSet, err := BuildReaderProof(bytes.NewReader(data), i)
		if err != nil {
			t.Error(err)
			continue
		}
		if !VerifySegment(baseSegment, hashSet, numSegments, i, rootHash) {
			t.Error("Proof", i, "did not pass verification")
		}
	}

	// Try an incorrect proof.
	baseSegment, hashSet, err := BuildReaderProof(bytes.NewReader(data), 3)
	if err != nil {
		t.Fatal(err)
	}
	if VerifySegment(baseSegment, hashSet, numSegments, 4, rootHash) {
		t.Error("Verified a bad proof")
	}
}
Пример #10
0
func TestSizeSplitterIsDeterministic(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	test := func() {
		bufR := randBuf(t, 10000000) // crank this up to satisfy yourself.
		bufA := copyBuf(bufR)
		bufB := copyBuf(bufR)

		chunksA := DefaultSplitter.Split(bytes.NewReader(bufA))
		chunksB := DefaultSplitter.Split(bytes.NewReader(bufB))

		for n := 0; ; n++ {
			a, moreA := <-chunksA
			b, moreB := <-chunksB

			if !moreA {
				if moreB {
					t.Fatal("A ended, B didnt.")
				}
				return
			}

			if !bytes.Equal(a, b) {
				t.Fatalf("chunk %d not equal", n)
			}
		}
	}

	for run := 0; run < 1; run++ { // crank this up to satisfy yourself.
		test()
	}
}
Пример #11
0
func (log *Log) appendEntries(s *Server, prevIndex int64, logEntries []*LogEntry) {
	log.Lock()
	defer log.Unlock()
	if len(log.entries) == 0 {
		log.entries = logEntries
		return
	}
	log.entries = log.entries[:prevIndex-log.startIndex()+1]
	// log.entries = append(log.entries[:(index-log.startIndex())+1], logEntries...)
	for _, entry := range logEntries {
		if entry.GetCommandName() == cOldNewStr {
			nodes := []Node{}
			if err := gob.NewDecoder(bytes.NewReader(entry.GetCommand())).Decode(&nodes); err != nil {
				logger.Println("decode Cold,new config err: ", err.Error())
			}
			s.config.setState(cOldNew)
			s.config.cNewNode = make(nodeMap)
			for _, node := range nodes {
				s.config.cNewNode[node.id()] = node
			}
		} else if entry.GetCommandName() == cNewStr {
			nodes := []Node{}
			if err := gob.NewDecoder(bytes.NewReader(entry.GetCommand())).Decode(&nodes); err != nil {
				logger.Println("decode Cnew config err: ", err.Error())
			}
			s.config.setState(cOld)
			s.config.cOldNode = makeNodeMap(nodes...)
			s.config.cNewNode = nil
		}
		log.entries = append(log.entries, entry)
	}
}
Пример #12
0
// NextReader returns the next message received from the peer. The returned
// messageType is one of TextMessage, BinaryMessage or PongMessage. The
// connection automatically handles ping messages received from the peer.
// NextReader returns an error upon receiving a close message from the peer.
//
// There can be at most one open reader on a connection. NextReader discards
// the previous message if the application has not already consumed it.
//
// The NextReader method and the readers returned from the method cannot be
// accessed by more than one goroutine at a time.
func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {

	c.readSeq += 1
	c.readLength = 0

	if c.savedPong != nil {
		r := bytes.NewReader(c.savedPong)
		c.savedPong = nil
		return PongMessage, r, nil
	}

	for c.readErr == nil {
		var opCode int
		opCode, c.readErr = c.advanceFrame()
		switch opCode {
		case TextMessage, BinaryMessage:
			return opCode, messageReader{c, c.readSeq}, nil
		case PongMessage:
			r := bytes.NewReader(c.savedPong)
			c.savedPong = nil
			return PongMessage, r, nil
		case continuationFrame:
			// do nothing
		}
	}
	return -1, nil, c.readErr
}
Пример #13
0
func TestNewTemplateFromArchive(t *testing.T) {
	r := bytes.NewReader([]byte{})
	if _, err := NewTemplateFromArchive("", r, nil); err == nil {
		t.Fatalf("expected error did not occur for empty input: %s", err)
	}

	r = bytes.NewReader([]byte("test"))
	if _, err := NewTemplateFromArchive("", r, nil); err == nil {
		t.Fatalf("expected error did not occur for non archive file:%s", err)
	}

	r = generateArchive(t, invalidFiles)
	if _, err := NewTemplateFromArchive(invalidFiles[0].Name, r, nil); err == nil {
		t.Fatalf("expected error did not occur for empty file in archive")
	}

	r = generateArchive(t, validFiles)
	if _, err := NewTemplateFromArchive("", r, nil); err == nil {
		t.Fatalf("expected error did not occur for missing file in archive")
	}

	r = generateArchive(t, validFiles)
	if _, err := NewTemplateFromArchive(validFiles[1].Name, r, nil); err != nil {
		t.Fatalf("cannnot create template from valid archive")
	}
}
Пример #14
0
func FuzzAll(data []byte) int {
	cfg, err := gif.DecodeConfig(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	if cfg.Width*cfg.Height > 1e6 {
		return 0
	}
	img, err := gif.DecodeAll(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	w := new(bytes.Buffer)
	err = gif.EncodeAll(w, img)
	if err != nil {
		panic(err)
	}
	img1, err := gif.DecodeAll(w)
	if err != nil {
		panic(err)
	}
	if img.LoopCount == 0 && img1.LoopCount == -1 {
		// https://github.com/golang/go/issues/11287
		img1.LoopCount = 0
	}
	// https://github.com/golang/go/issues/11288
	img1.Disposal = img.Disposal
	if !fuzz.DeepEqual(img, img1) {
		fmt.Printf("gif0: %#v\n", img)
		fmt.Printf("gif1: %#v\n", img1)
		panic("gif changed")
	}
	return 1
}
Пример #15
0
func (s *signedSuite) SetUpSuite(c *gc.C) {
	var imageData = map[string]string{
		"/unsigned/streams/v1/index.json":          unsignedIndex,
		"/unsigned/streams/v1/image_metadata.json": unsignedProduct,
	}

	// Set up some signed data from the unsigned data.
	// Overwrite the product path to use the sjson suffix.
	rawUnsignedIndex := strings.Replace(
		unsignedIndex, "streams/v1/image_metadata.json", "streams/v1/image_metadata.sjson", -1)
	r := bytes.NewReader([]byte(rawUnsignedIndex))
	signedData, err := simplestreams.Encode(
		r, sstesting.SignedMetadataPrivateKey, sstesting.PrivateKeyPassphrase)
	c.Assert(err, jc.ErrorIsNil)
	imageData["/signed/streams/v1/index.sjson"] = string(signedData)

	// Replace the image id in the unsigned data with a different one so we can test that the right
	// image id is used.
	rawUnsignedProduct := strings.Replace(
		unsignedProduct, "ami-26745463", "ami-123456", -1)
	r = bytes.NewReader([]byte(rawUnsignedProduct))
	signedData, err = simplestreams.Encode(
		r, sstesting.SignedMetadataPrivateKey, sstesting.PrivateKeyPassphrase)
	c.Assert(err, jc.ErrorIsNil)
	imageData["/signed/streams/v1/image_metadata.sjson"] = string(signedData)
	sstesting.SetRoundTripperFiles(imageData, map[string]int{"test://unauth": http.StatusUnauthorized})
	s.origKey = imagemetadata.SetSigningPublicKey(sstesting.SignedMetadataPublicKey)
}
Пример #16
0
func Fuzz(data []byte) int {
	cfg, err := png.DecodeConfig(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	if cfg.Width*cfg.Height > 1e6 {
		return 0
	}
	img, err := png.Decode(bytes.NewReader(data))
	if err != nil {
		return 0
	}
	for _, c := range []png.CompressionLevel{png.DefaultCompression, png.NoCompression, png.BestSpeed, png.BestCompression} {
		var w bytes.Buffer
		e := &png.Encoder{c}
		err = e.Encode(&w, img)
		if err != nil {
			panic(err)
		}
		img1, err := png.Decode(&w)
		if err != nil {
			panic(err)
		}
		if !reflect.DeepEqual(img.Bounds(), img1.Bounds()) {
			fmt.Printf("bounds0: %#v\n", img.Bounds())
			fmt.Printf("bounds1: %#v\n", img1.Bounds())
			panic("bounds have changed")
		}
	}
	return 1
}
Пример #17
0
func (ctx *requestContext) process() {
	body, err := ioutil.ReadAll(ctx.req.Body)
	if err != nil {
		ctx.err(400, err)
		return
	}

	ctx.reqBody = &body
	ctx.req.Body = ioutil.NopCloser(bytes.NewReader(*ctx.reqBody))

	req, err := ask.NewRequestFromJSON(bytes.NewReader(*ctx.reqBody))
	if err != nil {
		ctx.err(400, err)
		return
	}

	if !ctx.validateRequestSignature(req) ||
		!ctx.validateTimestamp(req) ||
		!ctx.validateApplicationID(req) ||
		!ctx.validateRequestType(req) {
		return
	}

	ctx.skill.internalHandler(ctx, req)
}
Пример #18
0
func TestVerifyChecksumPatchNegative(t *testing.T) {
	t.Parallel()

	fName := "TestVerifyChecksumPatchNegative"
	defer cleanup(fName)
	writeOldFile(fName, t)

	checksum, err := ChecksumForBytes(newFile)
	if err != nil {
		t.Fatalf("Failed to compute checksum: %v", err)
	}

	patch := new(bytes.Buffer)
	anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
	err = binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
	if err != nil {
		t.Fatalf("Failed to create patch: %v", err)
	}

	up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
	err, _ = up.FromStream(bytes.NewReader(patch.Bytes()))
	if err == nil {
		t.Fatalf("Failed to detect patch to wrong file!")
	}
}
Пример #19
0
func decompress() {
	log.Info("Decompressing Riak Explorer")

	var err error
	if err := os.Mkdir("riak_explorer", 0777); err != nil {
		log.Fatal("Unable to make rex directory: ", err)
	}

	asset, err := artifacts.Asset("trusty.tar.gz")
	if err != nil {
		log.Fatal(err)
	}
	if err = common.ExtractGZ("riak_explorer", bytes.NewReader(asset)); err != nil {
		log.Fatal("Unable to extract trusty root: ", err)
	}
	asset, err = artifacts.Asset("riak_explorer-bin.tar.gz")

	if err != nil {
		log.Fatal(err)
	}
	if err = common.ExtractGZ("riak_explorer", bytes.NewReader(asset)); err != nil {
		log.Fatal("Unable to extract rex: ", err)
	}

}
Пример #20
0
func (s *signedSuite) SetUpSuite(c *gc.C) {
	s.BaseSuite.SetUpSuite(c)
	var imageData = map[string]string{
		"/unsigned/streams/v1/index.json":          unsignedIndex,
		"/unsigned/streams/v1/tools_metadata.json": unsignedProduct,
	}

	// Set up some signed data from the unsigned data.
	// Overwrite the product path to use the sjson suffix.
	rawUnsignedIndex := strings.Replace(
		unsignedIndex, "streams/v1/tools_metadata.json", "streams/v1/tools_metadata.sjson", -1)
	r := bytes.NewReader([]byte(rawUnsignedIndex))
	signedData, err := simplestreams.Encode(
		r, sstesting.SignedMetadataPrivateKey, sstesting.PrivateKeyPassphrase)
	c.Assert(err, jc.ErrorIsNil)
	imageData["/signed/streams/v1/index.sjson"] = string(signedData)

	// Replace the tools path in the unsigned data with a different one so we can test that the right
	// tools path is used.
	rawUnsignedProduct := strings.Replace(
		unsignedProduct, "juju-1.13.0", "juju-1.13.1", -1)
	r = bytes.NewReader([]byte(rawUnsignedProduct))
	signedData, err = simplestreams.Encode(
		r, sstesting.SignedMetadataPrivateKey, sstesting.PrivateKeyPassphrase)
	c.Assert(err, jc.ErrorIsNil)
	imageData["/signed/streams/v1/tools_metadata.sjson"] = string(signedData)
	sstesting.SetRoundTripperFiles(imageData, map[string]int{"signedtest://unauth": http.StatusUnauthorized})
	s.PatchValue(&juju.JujuPublicKey, sstesting.SignedMetadataPublicKey)
}
Пример #21
0
func TestParsePAXHeader(t *testing.T) {
	paxTests := [][3]string{
		{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
		{"a", "a=name", "9 a=name\n"},  // Test case involving multiple acceptable length
		{"mtime", "mtime=1350244992.023960108", "30 mtime=1350244992.023960108\n"}}
	for _, test := range paxTests {
		key, expected, raw := test[0], test[1], test[2]
		reader := bytes.NewReader([]byte(raw))
		headers, err := parsePAX(reader)
		if err != nil {
			t.Errorf("Couldn't parse correctly formatted headers: %v", err)
			continue
		}
		if strings.EqualFold(headers[key], expected) {
			t.Errorf("mtime header incorrectly parsed: got %s, wanted %s", headers[key], expected)
			continue
		}
		trailer := make([]byte, 100)
		n, err := reader.Read(trailer)
		if err != io.EOF || n != 0 {
			t.Error("Buffer wasn't consumed")
		}
	}
	badHeader := bytes.NewReader([]byte("3 somelongkey="))
	if _, err := parsePAX(badHeader); err != ErrHeader {
		t.Fatal("Unexpected success when parsing bad header")
	}
}
Пример #22
0
func WriteFile(f []byte, path string) *model.AppError {
	if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_S3 {
		endpoint := utils.Cfg.FileSettings.AmazonS3Endpoint
		accessKey := utils.Cfg.FileSettings.AmazonS3AccessKeyId
		secretKey := utils.Cfg.FileSettings.AmazonS3SecretAccessKey
		secure := *utils.Cfg.FileSettings.AmazonS3SSL
		s3Clnt, err := s3.New(endpoint, accessKey, secretKey, secure)
		if err != nil {
			return model.NewLocAppError("WriteFile", "api.file.write_file.s3.app_error", nil, err.Error())
		}
		bucket := utils.Cfg.FileSettings.AmazonS3Bucket
		ext := filepath.Ext(path)

		if model.IsFileExtImage(ext) {
			_, err = s3Clnt.PutObject(bucket, path, bytes.NewReader(f), model.GetImageMimeType(ext))
		} else {
			_, err = s3Clnt.PutObject(bucket, path, bytes.NewReader(f), "binary/octet-stream")
		}
		if err != nil {
			return model.NewLocAppError("WriteFile", "api.file.write_file.s3.app_error", nil, err.Error())
		}
	} else if utils.Cfg.FileSettings.DriverName == model.IMAGE_DRIVER_LOCAL {
		if err := writeFileLocally(f, utils.Cfg.FileSettings.Directory+path); err != nil {
			return err
		}
	} else {
		return model.NewLocAppError("WriteFile", "api.file.write_file.configured.app_error", nil, "")
	}

	return nil
}
Пример #23
0
// FetchUser will go to Google+ and access basic information about the user.
func (p *Provider) FetchUser(session goth.Session) (goth.User, error) {
	sess := session.(*Session)
	user := goth.User{
		AccessToken: sess.AccessToken,
		Provider:    p.Name(),
	}

	response, err := http.Get(endpointProfile + "?access_token=" + url.QueryEscape(sess.AccessToken))
	if err != nil {
		return user, err
	}
	defer response.Body.Close()

	bits, err := ioutil.ReadAll(response.Body)
	if err != nil {
		return user, err
	}

	err = json.NewDecoder(bytes.NewReader(bits)).Decode(&user.RawData)
	if err != nil {
		return user, err
	}

	err = userFromReader(bytes.NewReader(bits), &user)
	return user, err
}
Пример #24
0
func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) {
	ctx := context.Background()
	wr, err := bs.Create(ctx)
	if err != nil {
		t.Fatalf("unexpected error starting upload: %v", err)
	}

	nn, err := io.Copy(wr, bytes.NewReader(blob))
	if err != nil {
		t.Fatalf("error copying into blob writer: %v", err)
	}

	if nn != 0 {
		t.Fatalf("unexpected number of bytes copied: %v > 0", nn)
	}

	dgst, err := digest.FromReader(bytes.NewReader(blob))
	if err != nil {
		t.Fatalf("error getting digest: %v", err)
	}

	if dgst != expectedDigest {
		// sanity check on zero digest
		t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest)
	}

	desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error committing write: %v", err)
	}

	if desc.Digest != dgst {
		t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst)
	}
}
Пример #25
0
func TestDecompInlen(t *testing.T) {
	data := bytes.Repeat([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 1000)
	cmp := Compress1X(data)

	for i := 1; i < 16; i++ {
		for j := -16; j < 16; j++ {
			_, err := Decompress1X(io.LimitReader(bytes.NewReader(cmp), int64(len(cmp)-i)), len(cmp)+j, 0)
			if err != io.EOF {
				t.Error("EOF expected for truncated input, found:", err)
			}
		}
	}

	for j := -16; j < 16; j++ {
		data2, err := Decompress1X(bytes.NewReader(cmp), len(cmp)+j, 0)
		if j < 0 && err != io.EOF {
			t.Error("EOF expected for truncated input, found:", err)
		}
		if j >= 0 {
			if err != nil {
				t.Error("error for normal decompression:", err, j)
			} else if !reflect.DeepEqual(data, data2) {
				t.Error("data doesn't match after decompression")
			}
		}
	}
}
Пример #26
0
func TestTemplateVarsContainSpaces(t *testing.T) {
	html := `<h1>{{a }}{{ > b }}{{ e}}</h1>`
	data := map[string]interface{}{
		"a": "Hello",
		"d": "World",
		"e": "!",
	}

	var exp = `<h1>Hello World!</h1>`

	tmpl := &Template{
		File: bytes.NewReader([]byte(html)),
		Data: &Data{Value: data},
	}
	tmpl.Partial(func(path string) (io.Reader, error) {
		var p []byte
		switch path {
		case "b":
			p = []byte(` {{> c}}`)
		case "c":
			p = []byte(`{{d}}`)

		default:
			t.Error("invalid partial %s", path)
		}

		return bytes.NewReader(p), nil
	})

	Asser{t}.
		Given(a(tmpl)).
		Then(bodyEquals(exp)).
		And(errorIs(nil))
}
Пример #27
0
func (p *Provider) FetchUser(session goth.Session) (*goth.User, error) {
	sess := session.(*Session)
	user := goth.User{
		AccessToken:  sess.AccessToken,
		RefreshToken: sess.RefreshToken,
	}
	client := &http.Client{}
	req, err := http.NewRequest("GET", endpointProfile, nil)
	if err != nil {
		return nil, err
	}
	req.Header.Add("Authorization", "Bearer "+sess.AccessToken)
	response, err := client.Do(req)
	if err != nil {
		return nil, err
	}
	defer response.Body.Close()
	bits, err := ioutil.ReadAll(response.Body)
	if err != nil {
		return nil, err
	}
	err = json.NewDecoder(bytes.NewReader(bits)).Decode(&user.RawData)
	if err != nil {
		return nil, err
	}

	err = userFromReader(bytes.NewReader(bits), &user)
	return &user, err
}
Пример #28
0
func opener(t *testing.T) func(string) (Reader, Reader) {
	var wrdr, wrdr2 Reader
	return func(path string) (Reader, Reader) {
		buf, _ := ioutil.ReadFile(path)
		rdr := bytes.NewReader(buf)
		rdr2 := bytes.NewReader(buf)
		var err error
		if wrdr == nil {
			wrdr, err = NewReader(rdr)
		} else {
			err = wrdr.Reset(rdr)
		}
		if err != nil {
			if strings.Index(path, "invalid") != -1 {
				return nil, nil
			}
			t.Fatalf("test case: %s; error: %v", path, err)
		}
		if wrdr2 == nil {
			wrdr2, err = NewReader(rdr2)
		} else {
			err = wrdr2.Reset(rdr2)
		}
		if err != nil {
			if strings.Index(path, "invalid") != -1 {
				return nil, nil
			}
			t.Fatalf("test case: %s; error: %v", path, err)
		}
		return wrdr, wrdr2
	}
}
Пример #29
0
func TestChunkerWithoutHash(t *testing.T) {
	// setup data source
	buf := getRandom(23, 32*1024*1024)

	ch := New(bytes.NewReader(buf), testPol)
	chunks := testWithData(t, ch, chunks1, false)

	// test reader
	for i, c := range chunks {
		if uint(len(c.Data)) != chunks1[i].Length {
			t.Fatalf("reader returned wrong number of bytes: expected %d, got %d",
				chunks1[i].Length, len(c.Data))
		}

		if !bytes.Equal(buf[c.Start:c.Start+c.Length], c.Data) {
			t.Fatalf("invalid data for chunk returned: expected %02x, got %02x",
				buf[c.Start:c.Start+c.Length], c.Data)
		}
	}

	// setup nullbyte data source
	buf = bytes.Repeat([]byte{0}, len(chunks2)*MinSize)
	ch = New(bytes.NewReader(buf), testPol)

	testWithData(t, ch, chunks2, false)
}
Пример #30
0
func saveToCache(version string, data []byte) bool {
	if s3Connection == nil {
		return false
	}

	buf := bytes.NewReader(data)
	_, err := s3Connection.PutObject(&s3.PutObjectInput{
		Bucket: aws.String(config.Bucket),
		Key:    aws.String(version),
		Body:   buf,
	})

	if err != nil {
		log.Printf("Unable to save to version: %s", err)
		return false
	}

	_, err = s3Connection.PutObject(&s3.PutObjectInput{
		Bucket: aws.String(config.Bucket),
		Key:    aws.String("latest"),
		Body:   bytes.NewReader([]byte(version)),
	})

	if err != nil {
		log.Printf("Unable to put version id: %s", err)
		return false
	}

	return true
}