func actorWriteFile(d *Directory) error { fi, err := randomFile(d) if err != nil { return err } if fi == nil { return nil } size := rand.Intn(1024) + 1 buf := make([]byte, size) rand.Read(buf) s, err := fi.Size() if err != nil { return err } wfd, err := fi.Open(OpenWriteOnly, true) if err != nil { return err } offset := rand.Int63n(s) n, err := wfd.WriteAt(buf, offset) if err != nil { return err } if n != size { return fmt.Errorf("didnt write enough") } return wfd.Close() }
func mustWriteRandFile(path string, size int, seed int64) *os.File { p := make([]byte, size) rand.Seed(seed) _, err := rand.Read(p) if err != nil { panic(err) } f, err := os.Create(path) if err != nil { panic(err) } _, err = f.Write(p) if err != nil { panic(err) } _, err = f.Seek(0, 0) if err != nil { panic(err) } return f }
// Read n bytes from a pipe and pass bytes read to a callback. If an error occurs // error is passed to the callback. The callback signature is: // Func(err error, bs []byte, done bool). func ReadReader(reader io.Reader, nBytes int, cb func(err error, bs []byte, done bool) bool) { r := bufio.NewReader(reader) buf := make([]byte, 0, nBytes) for { n, err := r.Read(buf[:cap(buf)]) buf = buf[:n] if n == 0 { if err == nil { continue } if err == io.EOF { break } if cb(err, buf, false) == false { return } } // process buf if err != nil && err != io.EOF { if cb(err, buf, false) == false { return } } if cb(err, buf, false) == false { return } } cb(nil, buf, true) }
// allocateUniqueUid returns an integer in range: // [minIdx, maxIdx] derived based on numInstances and instanceIdx. // which hasn't already been allocated to other xids. It does this by // taking the fingerprint of the xid appended with zero or more spaces // until the obtained integer is unique. func allocateUniqueUid(instanceIdx uint64, numInstances uint64) uint64 { mod := math.MaxUint64 / numInstances minIdx := instanceIdx * mod buf := make([]byte, 128) for { _, err := rand.Read(buf) x.Checkf(err, "rand.Read shouldn't throw an error") uidb := farm.Fingerprint64(buf) // Generate from hash. uid := (uidb % mod) + minIdx if uid == math.MaxUint64 || !lmgr.isNew(uid) { continue } // Check if this uid has already been allocated. key := x.DataKey("_uid_", uid) pl, decr := posting.GetOrCreate(key) defer decr() if pl.Length(0) == 0 { return uid } } log.Fatalf("This shouldn't be reached.") return 0 }
func newRandomID() *dht.ID { id := new(dht.ID) n, err := rand.Read(id[:]) if err != nil || n != dht.IDLen { return dht.ZeroID } return id }
func Challenge13() { fmt.Println("\nSet 2 challenge 13\n==================") profile := ProfileFor("*****@*****.**") key := make([]byte, 16) rand.Read(key) EncryptAes128Ecb([]byte(profile), key) }
func makeTestData(size int) []byte { out := make([]byte, size) _, err := rand.Read(out) if err != nil { panic(err) } return out }
// SecureRandomBytes returns the requested number of bytes using crypto/rand func SecureRandomBytes(length int) []byte { var randomBytes = make([]byte, length) _, err := rand.Read(randomBytes) if err != nil { log.Fatal("Unable to generate random bytes") } return randomBytes }
func rnd(t *testing.T, n int) []byte { b := make([]byte, n) i, err := rand.Read(b) if err != nil || i != n { t.Fatal("rand failed") } return b }
func (p *Encryptor) Sum(plain string, size int) (string, error) { salt := make([]byte, size) _, err := rand.Read(salt) if err != nil { return "", err } return p.sum(plain, salt), nil }
// https://www.ietf.org/rfc/rfc4648.txt - use "Base 64 Encoding with URL and Filename Safe Alphabet" // No need to worry about '+' and '/' according to the above RFC func generateString(size int) string { rb := make([]byte, size) _, err := rand.Read(rb) if err != nil { fmt.Println(err) } return base64.URLEncoding.EncodeToString(rb) }
func TestCodec_ReceiveLimited(t *testing.T) { const limit = 2048 var payloads [][]byte for _, size := range []int{ 1024, 2048, 4096, // receive of this message would be interrupted due to limit 2048, // this one is to make sure next receive recovers discarding leftovers } { b := make([]byte, size) rand.Read(b) payloads = append(payloads, b) } handlerDone := make(chan struct{}) limitedHandler := func(ws *Conn) { defer close(handlerDone) ws.MaxPayloadBytes = limit defer ws.Close() for i, p := range payloads { t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) var recv []byte err := Message.Receive(ws, &recv) switch err { case nil: case ErrFrameTooLarge: if len(p) <= limit { t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) } continue default: t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) } if len(recv) > limit { t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) } if !bytes.Equal(p, recv) { t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) } } } server := httptest.NewServer(Handler(limitedHandler)) defer server.CloseClientConnections() defer server.Close() addr := server.Listener.Addr().String() ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") if err != nil { t.Fatal(err) } defer ws.Close() for i, p := range payloads { if err := Message.Send(ws, p); err != nil { t.Fatalf("payload #%d (size %d): %v", i, len(p), err) } } <-handlerDone }
func init() { testContent = make([]byte, maxTestContent) n, err := rand.Read(testContent) if err != nil { panic(err) } if n != len(testContent) { panic(errors.New("failed to generate random content")) } }
func TestMoveWithMultipartCopy(t *testing.T) { if skipS3() != "" { t.Skip(skipS3()) } rootDir, err := ioutil.TempDir("", "driver-") if err != nil { t.Fatalf("unexpected error creating temporary directory: %v", err) } defer os.Remove(rootDir) d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) if err != nil { t.Fatalf("unexpected error creating driver: %v", err) } ctx := context.Background() sourcePath := "/source" destPath := "/dest" defer d.Delete(ctx, sourcePath) defer d.Delete(ctx, destPath) // An object larger than d's MultipartCopyThresholdSize will cause d.Move() to perform a multipart copy. multipartCopyThresholdSize := d.baseEmbed.Base.StorageDriver.(*driver).MultipartCopyThresholdSize contents := make([]byte, 2*multipartCopyThresholdSize) rand.Read(contents) err = d.PutContent(ctx, sourcePath, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } err = d.Move(ctx, sourcePath, destPath) if err != nil { t.Fatalf("unexpected error moving file: %v", err) } received, err := d.GetContent(ctx, destPath) if err != nil { t.Fatalf("unexpected error getting content: %v", err) } if !bytes.Equal(contents, received) { t.Fatal("content differs") } _, err = d.GetContent(ctx, sourcePath) switch err.(type) { case storagedriver.PathNotFoundError: default: t.Fatalf("unexpected error getting content: %v", err) } }
func getRandomUint64Set(n int) []uint64 { seed := int64(42) p("seed is %v", seed) rand.Seed(seed) var buf [8]byte var o []uint64 for i := 0; i < n; i++ { rand.Read(buf[:]) o = append(o, binary.LittleEndian.Uint64(buf[:])) } return o }
// testPeekLength appends some random garbage to an encoding and verifies // that PeekLength returns the correct length. func testPeekLength(t *testing.T, encoded []byte) { gLen := rand.Intn(10) garbage := make([]byte, gLen) _, _ = rand.Read(garbage) var buf []byte buf = append(buf, encoded...) buf = append(buf, garbage...) if l, err := PeekLength(buf); err != nil { t.Fatal(err) } else if l != len(encoded) { t.Errorf("PeekLength returned incorrect length: %d, expected %d", l, len(encoded)) } }
func TestScript(t *testing.T) { testScr := func(n int) error { b := make([]byte, n) n, err := rand.Read(b) if err != nil || n != len(b) { t.Fatal("rand failed") } _, err = NullDataScript(b) return err } if testScr(64) != nil { t.Fatal("nulldatascript failed on short") } if testScr(128) == nil { t.Fatal("nulldatascript failed on long") } }
func (ts TestSteward) SealVault(name string, password *string, vault *vaulted.Vault) error { if password == nil { b := make([]byte, 6) _, err := rand.Read(b) if err != nil { return err } newPassword := base64.StdEncoding.EncodeToString(b) password = &newPassword } ts.Passwords[name] = *password ts.Vaults[name] = cloneVault(vault) return nil }
func TestEcho(t *testing.T) { a, b := net.Pipe() mpa := NewMultiplex(a, false) mpb := NewMultiplex(b, true) mes := make([]byte, 40960) rand.Read(mes) go func() { s, err := mpb.Accept() if err != nil { t.Fatal(err) } defer s.Close() io.Copy(s, s) }() s, err := mpa.NewStream() if err != nil { t.Fatal(err) } _, err = s.Write(mes) if err != nil { t.Fatal(err) } buf := make([]byte, len(mes)) n, err := io.ReadFull(s, buf) if err != nil { t.Fatal(err) } if n != len(mes) { t.Fatal("read wrong amount") } if err := arrComp(buf, mes); err != nil { t.Fatal(err) } s.Close() mpa.Close() mpb.Close() }
func TestConcurrentReads(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ds, rt := setupRoot(ctx, t) rootdir := rt.GetValue().(*Directory) path := "a/b/c" d := mkdirP(t, rootdir, path) buf := make([]byte, 2048) rand.Read(buf) fi := fileNodeFromReader(t, ds, bytes.NewReader(buf)) err := d.AddChild("afile", fi) if err != nil { t.Fatal(err) } var wg sync.WaitGroup nloops := 100 for i := 0; i < 10; i++ { wg.Add(1) go func(me int) { defer wg.Done() mybuf := make([]byte, len(buf)) for j := 0; j < nloops; j++ { offset := rand.Intn(len(buf)) length := rand.Intn(len(buf) - offset) err := readFile(rt, "/a/b/c/afile", int64(offset), mybuf[:length]) if err != nil { t.Error("readfile failed: ", err) return } if !bytes.Equal(mybuf[:length], buf[offset:offset+length]) { t.Error("incorrect read!") } } }(i) } wg.Wait() }
// testCrossCheck generates random buffers of various lengths and verifies that // the two "update" functions return the same result. func testCrossCheck(t *testing.T, crcFunc1, crcFunc2 func(crc uint32, b []byte) uint32) { // The AMD64 implementation has some cutoffs at lengths 168*3=504 and // 1344*3=4032. We should make sure lengths around these values are in the // list. lengths := []int{0, 1, 2, 3, 4, 5, 10, 16, 50, 100, 128, 500, 501, 502, 503, 504, 505, 512, 1000, 1024, 2000, 4030, 4031, 4032, 4033, 4036, 4040, 4048, 4096, 5000, 10000} for _, length := range lengths { p := make([]byte, length) _, _ = rand.Read(p) crcInit := uint32(rand.Int63()) crc1 := crcFunc1(crcInit, p) crc2 := crcFunc2(crcInit, p) if crc1 != crc2 { t.Errorf("mismatch: 0x%x vs 0x%x (buffer length %d)", crc1, crc2, length) } } }
func postMessage() []*TCPPacket { ack := uint32(rand.Int63()) seq2 := uint32(rand.Int63()) seq := uint32(rand.Int63()) c := 10000 data := make([]byte, c) rand.Read(data) head := []byte("POST / HTTP/1.1\r\nContent-Length: 9958\r\n\r\n") for i := range head { data[i] = head[i] } return []*TCPPacket{ buildPacket(true, ack, seq, data, time.Now()), buildPacket(false, seq+uint32(len(data)), seq2, []byte("HTTP/1.1 200 OK\r\n\r\n"), time.Now()), } }
func TestSimpleRead(t *testing.T) { ctx := context.Background() content := make([]byte, 1<<20) n, err := mrand.Read(content) if err != nil { t.Fatalf("unexpected error building random data: %v", err) } if n != len(content) { t.Fatalf("random read didn't fill buffer") } dgst, err := digest.FromReader(bytes.NewReader(content)) if err != nil { t.Fatalf("unexpected error digesting random content: %v", err) } driver := inmemory.New() path := "/random" if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("error allocating file reader: %v", err) } verifier, err := digest.NewDigestVerifier(dgst) if err != nil { t.Fatalf("error getting digest verifier: %s", err) } io.Copy(verifier, fr) if !verifier.Verified() { t.Fatalf("unable to verify read data") } }
func sha256WithSalt(value, saltValue []byte) ([sha256.Size]byte, []byte) { salt := make([]byte, shaSettings.SaltLength) if saltValue != nil { salt = []byte(saltValue) } else { n, err := rand.Read(salt) if err != nil { fmt.Println("There was an error generating a salt: ", err) return [sha256.Size]byte{}, nil } if n != shaSettings.SaltLength { fmt.Printf("Only %d characters were read.\n", n) return [sha256.Size]byte{}, nil } } saltedVal := append([]byte(value), salt...) encrypted := sha256.Sum256(saltedVal) return encrypted, salt }
// Test erasureReadFile with random offset and lengths. // This test is t.Skip()ed as it a long time to run, hence should be run // explicitly after commenting out t.Skip() func TestErasureReadFileRandomOffsetLength(t *testing.T) { // Comment the following line to run this test. t.SkipNow() // Initialize environment needed for the test. dataBlocks := 7 parityBlocks := 7 blockSize := int64(1 * 1024 * 1024) setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) if err != nil { t.Error(err) return } defer setup.Remove() disks := setup.disks // Prepare a slice of 5MB with random data. data := make([]byte, 5*1024*1024) length := int64(len(data)) _, err = rand.Read(data) if err != nil { t.Fatal(err) } // 10000 iterations with random offsets and lengths. iterations := 10000 // Create a test file to read from. size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } if size != length { t.Errorf("erasureCreateFile returned %d, expected %d", size, length) } // To generate random offset/length. r := rand.New(rand.NewSource(time.Now().UnixNano())) // create pool buffer which will be used by erasureReadFile for // reading from disks and erasure decoding. chunkSize := getChunkSize(blockSize, dataBlocks) pool := bpool.NewBytePool(chunkSize, len(disks)) buf := &bytes.Buffer{} // Verify erasureReadFile() for random offsets and lengths. for i := 0; i < iterations; i++ { offset := r.Int63n(length) readLen := r.Int63n(length - offset) expected := data[offset : offset+readLen] _, err = erasureReadFile(buf, disks, "testbucket", "testobject", offset, readLen, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) if err != nil { t.Fatal(err, offset, readLen) } got := buf.Bytes() if !bytes.Equal(expected, got) { t.Fatalf("read data is different from what was expected, offset=%d length=%d", offset, readLen) } buf.Reset() } }
func randUUID() string { b := make([]byte, 16) rand.Read(b) return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) }
func main() { // Single error return _ = a() // BLANK a() // UNCHECKED // Return another value and an error _, _ = b() // BLANK b() // UNCHECKED // Return a custom error type _ = customError() // BLANK customError() // UNCHECKED // Return a custom concrete error type _ = customConcreteError() // BLANK customConcreteError() // UNCHECKED _, _ = customConcreteErrorTuple() // BLANK customConcreteErrorTuple() // UNCHECKED // Return a custom pointer error type _ = customPointerError() // BLANK customPointerError() // UNCHECKED _, _ = customPointerErrorTuple() // BLANK customPointerErrorTuple() // UNCHECKED // Method with a single error return x := t{} _ = x.a() // BLANK x.a() // UNCHECKED // Method call on a struct member y := u{x} _ = y.t.a() // BLANK y.t.a() // UNCHECKED m1 := map[string]func() error{"a": a} _ = m1["a"]() // BLANK m1["a"]() // UNCHECKED // Additional cases for assigning errors to blank identifier z, _ := b() // BLANK _, w := a(), 5 // BLANK // Assign non error to blank identifier _ = c() _ = z + w // Avoid complaints about unused variables // Type assertions var i interface{} s1 := i.(string) // ASSERT s1 = i.(string) // ASSERT s2, _ := i.(string) // ASSERT s2, _ = i.(string) // ASSERT s3, ok := i.(string) s3, ok = i.(string) switch s4 := i.(type) { case string: _ = s4 } _, _, _, _ = s1, s2, s3, ok // Goroutine go a() // UNCHECKED defer a() // UNCHECKED b1 := bytes.Buffer{} b2 := &bytes.Buffer{} b1.Write(nil) b2.Write(nil) rand.Read(nil) mrand.Read(nil) ioutil.ReadFile("main.go") // UNCHECKED }
// CreateRandomTarFile creates a random tarfile, returning it as an // io.ReadSeeker along with its digest. An error is returned if there is a // problem generating valid content. func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { nFiles := mrand.Intn(10) + 10 target := &bytes.Buffer{} wr := tar.NewWriter(target) // Perturb this on each iteration of the loop below. header := &tar.Header{ Mode: 0644, ModTime: time.Now(), Typeflag: tar.TypeReg, Uname: "randocalrissian", Gname: "cloudcity", AccessTime: time.Now(), ChangeTime: time.Now(), } for fileNumber := 0; fileNumber < nFiles; fileNumber++ { fileSize := mrand.Int63n(1<<20) + 1<<20 header.Name = fmt.Sprint(fileNumber) header.Size = fileSize if err := wr.WriteHeader(header); err != nil { return nil, "", err } randomData := make([]byte, fileSize) // Fill up the buffer with some random data. n, err := mrand.Read(randomData) if n != len(randomData) { return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) } if err != nil { return nil, "", err } nn, err := io.Copy(wr, bytes.NewReader(randomData)) if nn != fileSize { return nil, "", fmt.Errorf("short copy writing random file to tar") } if err != nil { return nil, "", err } if err := wr.Flush(); err != nil { return nil, "", err } } if err := wr.Close(); err != nil { return nil, "", err } dgst = digest.FromBytes(target.Bytes()) return bytes.NewReader(target.Bytes()), dgst, nil }
// Tests both object and bucket healing. func TestHealing(t *testing.T) { obj, fsDirs, err := prepareXL() if err != nil { t.Fatal(err) } defer removeRoots(fsDirs) xl := obj.(*xlObjects) // Create "bucket" err = obj.MakeBucket("bucket") if err != nil { t.Fatal(err) } bucket := "bucket" object := "object" data := make([]byte, 1*humanize.MiByte) length := int64(len(data)) _, err = rand.Read(data) if err != nil { t.Fatal(err) } _, err = obj.PutObject(bucket, object, length, bytes.NewReader(data), nil, "") if err != nil { t.Fatal(err) } disk := xl.storageDisks[0] xlMetaPreHeal, err := readXLMeta(disk, bucket, object) if err != nil { t.Fatal(err) } // Remove the object - to simulate the case where the disk was down when the object // was created. err = os.RemoveAll(path.Join(fsDirs[0], bucket, object)) if err != nil { t.Fatal(err) } err = xl.HealObject(bucket, object) if err != nil { t.Fatal(err) } xlMetaPostHeal, err := readXLMeta(disk, bucket, object) if err != nil { t.Fatal(err) } // After heal the meta file should be as expected. if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { t.Fatal("HealObject failed") } // Write xl.json with different modtime to simulate the case where a disk had // gone down when an object was replaced by a new object. xlMetaOutDated := xlMetaPreHeal xlMetaOutDated.Stat.ModTime = time.Now() err = writeXLMetadata(disk, bucket, object, xlMetaOutDated) if err != nil { t.Fatal(err) } err = xl.HealObject(bucket, object) if err != nil { t.Fatal(err) } xlMetaPostHeal, err = readXLMeta(disk, bucket, object) if err != nil { t.Fatal(err) } // After heal the meta file should be as expected. if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) { t.Fatal("HealObject failed") } // Remove the bucket - to simulate the case where bucket was // created when the disk was down. err = os.RemoveAll(path.Join(fsDirs[0], bucket)) if err != nil { t.Fatal(err) } // This would create the bucket. err = xl.HealBucket(bucket) if err != nil { t.Fatal(err) } // Stat the bucket to make sure that it was created. _, err = xl.storageDisks[0].StatVol(bucket) if err != nil { t.Fatal(err) } }
func TestErasureReadFileOffsetLength(t *testing.T) { // Initialize environment needed for the test. dataBlocks := 7 parityBlocks := 7 blockSize := int64(1 * 1024 * 1024) setup, err := newErasureTestSetup(dataBlocks, parityBlocks, blockSize) if err != nil { t.Error(err) return } defer setup.Remove() disks := setup.disks // Prepare a slice of 5MB with random data. data := make([]byte, 5*1024*1024) length := int64(len(data)) _, err = rand.Read(data) if err != nil { t.Fatal(err) } // Create a test file to read from. size, checkSums, err := erasureCreateFile(disks, "testbucket", "testobject", bytes.NewReader(data), blockSize, dataBlocks, parityBlocks, bitRotAlgo, dataBlocks+1) if err != nil { t.Fatal(err) } if size != length { t.Errorf("erasureCreateFile returned %d, expected %d", size, length) } testCases := []struct { offset, length int64 }{ // Full file. {0, length}, // Read nothing. {length, 0}, // 2nd block. {blockSize, blockSize}, // Test cases for random offsets and lengths. {blockSize - 1, 2}, {blockSize - 1, blockSize + 1}, {blockSize + 1, blockSize - 1}, {blockSize + 1, blockSize}, {blockSize + 1, blockSize + 1}, {blockSize*2 - 1, blockSize + 1}, {length - 1, 1}, {length - blockSize, blockSize}, {length - blockSize - 1, blockSize}, {length - blockSize - 1, blockSize + 1}, } chunkSize := getChunkSize(blockSize, dataBlocks) pool := bpool.NewBytePool(chunkSize, len(disks)) // Compare the data read from file with "data" byte array. for i, testCase := range testCases { expected := data[testCase.offset:(testCase.offset + testCase.length)] buf := &bytes.Buffer{} _, err = erasureReadFile(buf, disks, "testbucket", "testobject", testCase.offset, testCase.length, length, blockSize, dataBlocks, parityBlocks, checkSums, bitRotAlgo, pool) if err != nil { t.Error(err) continue } got := buf.Bytes() if !bytes.Equal(expected, got) { t.Errorf("Test %d : read data is different from what was expected", i+1) } } }