func TestCountDecodeMallocs(t *testing.T) { if testing.Short() { t.Skip("skipping malloc count in short mode") } if runtime.GOMAXPROCS(0) > 1 { t.Skip("skipping; GOMAXPROCS>1") } const N = 1000 var buf bytes.Buffer enc := NewEncoder(&buf) bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} // Fill the buffer with enough to decode testing.AllocsPerRun(N, func() { err := enc.Encode(bench) if err != nil { t.Fatal("encode:", err) } }) dec := NewDecoder(&buf) allocs := testing.AllocsPerRun(N, func() { *bench = Bench{} err := dec.Decode(&bench) if err != nil { t.Fatal("decode:", err) } }) if allocs != 3 { t.Fatalf("mallocs per decode of type Bench: %v; wanted 3\n", allocs) } }
// TestAlloc tests that some mapping methods should not cause any allocation. func TestAlloc(t *testing.T) { dst := make([]byte, 256) // big enough to hold any result src := []byte(txtNonASCII) for i, f := range []func() Caser{ func() Caser { return Upper(language.Und) }, func() Caser { return Lower(language.Und) }, func() Caser { return Title(language.Und) }, } { var c Caser v := testing.AllocsPerRun(2, func() { c = f() }) if v > 1 { // TODO: Right now only Upper has 1 allocation. Special-case Lower // and Title as well to have less allocations for the root locale. t.Skipf("%d:init: number of allocs was %f; want 0", i, v) } v = testing.AllocsPerRun(2, func() { c.Transform(dst, src, true) }) if v > 0 { t.Errorf("%d:transform: number of allocs was %f; want 0", i, v) } } }
func TestCountDecodeMallocs(t *testing.T) { const N = 1000 var buf bytes.Buffer enc := NewEncoder(&buf) bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} // Fill the buffer with enough to decode testing.AllocsPerRun(N, func() { err := enc.Encode(bench) if err != nil { t.Fatal("encode:", err) } }) dec := NewDecoder(&buf) allocs := testing.AllocsPerRun(N, func() { *bench = Bench{} err := dec.Decode(&bench) if err != nil { t.Fatal("decode:", err) } }) fmt.Printf("mallocs per decode of type Bench: %v\n", allocs) }
func TestNumericWeighterAlloc(t *testing.T) { buf := make([]Elem, 100) w := NewNumericWeighter(numWeighter) s := "1234567890a" nNormal := testing.AllocsPerRun(3, func() { numWeighter.AppendNextString(buf, s) }) nNumeric := testing.AllocsPerRun(3, func() { w.AppendNextString(buf, s) }) if n := nNumeric - nNormal; n > 0 { t.Errorf("got %f; want 0", n) } }
// Tests that assigning to RawBytes doesn't allocate (and also works). func TestRawBytesAllocs(t *testing.T) { buf := make(RawBytes, 10) test := func(name string, in interface{}, want string) { if err := convertAssign(&buf, in); err != nil { t.Fatalf("%s: convertAssign = %v", name, err) } match := len(buf) == len(want) if match { for i, b := range buf { if want[i] != b { match = false break } } } if !match { t.Fatalf("%s: got %q (len %d); want %q (len %d)", name, buf, len(buf), want, len(want)) } } n := testing.AllocsPerRun(100, func() { test("uint64", uint64(12345678), "12345678") test("uint32", uint32(1234), "1234") test("uint16", uint16(12), "12") test("uint8", uint8(1), "1") test("uint", uint(123), "123") test("int", int(123), "123") test("int8", int8(1), "1") test("int16", int16(12), "12") test("int32", int32(1234), "1234") test("int64", int64(12345678), "12345678") test("float32", float32(1.5), "1.5") test("float64", float64(64), "64") test("bool", false, "false") }) // The numbers below are only valid for 64-bit interface word sizes, // and gc. With 32-bit words there are more convT2E allocs, and // with gccgo, only pointers currently go in interface data. // So only care on amd64 gc for now. measureAllocs := runtime.GOARCH == "amd64" && runtime.Compiler == "gc" if n > 0.5 && measureAllocs { t.Fatalf("allocs = %v; want 0", n) } // This one involves a convT2E allocation, string -> interface{} n = testing.AllocsPerRun(100, func() { test("string", "foo", "foo") }) if n > 1.5 && measureAllocs { t.Fatalf("allocs = %v; want max 1", n) } }
func main() { nf := testing.AllocsPerRun(100, F) ng := testing.AllocsPerRun(100, G) if int(nf) != 1 { fmt.Printf("AllocsPerRun(100, F) = %v, want 1\n", nf) os.Exit(1) } if int(ng) != 0 { fmt.Printf("AllocsPerRun(100, G) = %v, want 0\n", ng) os.Exit(1) } }
func main() { nf := testing.AllocsPerRun(100, F) ng := testing.AllocsPerRun(100, G) if int(nf) > 1 { fmt.Printf("AllocsPerRun(100, F) = %v, want 1\n", nf) os.Exit(1) } if int(ng) != 0 && (runtime.Compiler != "gccgo" || int(ng) != 1) { fmt.Printf("AllocsPerRun(100, G) = %v, want 0\n", ng) os.Exit(1) } }
func TestString_ZeroAllocs(t *testing.T) { v := "jumped over the lazy dog" b := make([]byte, len(v)+1) assert.Zero(t, testing.AllocsPerRun(1, func() { PutString(b, v) })) assert.Zero(t, testing.AllocsPerRun(1, func() { String(b) })) //Note that while the String function requires zero allocations, //the decoded string can still escape... var x string assert.Equal(t, 1.0, testing.AllocsPerRun(1, func() { s := String(b) x = s })) assert.Equal(t, v, x) }
func TestChunkReaderAllocs(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } var buf bytes.Buffer w := NewChunkedWriter(&buf) a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") w.Write(a) w.Write(b) w.Write(c) w.Close() readBuf := make([]byte, len(a)+len(b)+len(c)+1) byter := bytes.NewReader(buf.Bytes()) bufr := bufio.NewReader(byter) mallocs := testing.AllocsPerRun(100, func() { byter.Seek(0, io.SeekStart) bufr.Reset(byter) r := NewChunkedReader(bufr) n, err := io.ReadFull(r, readBuf) if n != len(readBuf)-1 { t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) } if err != io.ErrUnexpectedEOF { t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) } }) if mallocs > 1.5 { t.Errorf("mallocs = %v; want 1", mallocs) } }
func TestClean(t *testing.T) { for _, test := range cleantests { if s := Clean(test.path); s != test.result { t.Errorf("Clean(%q) = %q, want %q", test.path, s, test.result) } if s := Clean(test.result); s != test.result { t.Errorf("Clean(%q) = %q, want %q", test.result, s, test.result) } } if runtime.GOMAXPROCS(0) > 1 { t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") return } t.Log("Skipping AllocsPerRun for gccgo") return for _, test := range cleantests { allocs := testing.AllocsPerRun(100, func() { Clean(test.result) }) if allocs > 0 { t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs) } } }
func TestClean(t *testing.T) { tests := cleantests if runtime.GOOS == "windows" { for i := range tests { tests[i].result = filepath.FromSlash(tests[i].result) } tests = append(tests, wincleantests...) } for _, test := range tests { if s := filepath.Clean(test.path); s != test.result { t.Errorf("Clean(%q) = %q, want %q", test.path, s, test.result) } if s := filepath.Clean(test.result); s != test.result { t.Errorf("Clean(%q) = %q, want %q", test.result, s, test.result) } } if testing.Short() { t.Skip("skipping malloc count in short mode") } if runtime.GOMAXPROCS(0) > 1 { t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") return } for _, test := range tests { allocs := testing.AllocsPerRun(100, func() { filepath.Clean(test.result) }) if allocs > 0 { t.Errorf("Clean(%q): %v allocs, want zero", test.result, allocs) } } }
func TestAllocsPerRun(t *testing.T) { for _, tt := range allocsPerRunTests { if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs { t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs) } } }
func TestReplaceIllFormedAlloc(t *testing.T) { if n := testing.AllocsPerRun(3, func() { ReplaceIllFormed().Transform(nil, nil, false) }); n > 0 { t.Errorf("got %f; want 0", n) } }
func TestGarbageCreation(t *testing.T) { // TestMultipleResultsQuery needs to work as expected dependOn(t, TestMultipleResultsQuery) // Test intervals intervals := IntervalSlice{ {4, 15, "First"}, {50, 72, "Second"}, {34, 90, "Third"}, {34, 45, "Fourth"}, {34, 40, "Fifth"}, {34, 34, "Sixth"}, {34, 45, "Seventh"}, } stab := setup(t, intervals) allocs := testing.AllocsPerRun(1000, func() { results, err := stab.Intersect(42) if err != nil { t.Fatal("Error during alloc run: ", err) } if results == nil { t.Fatal("Got 'nil' results during alloc run") } }) t.Log("Allocs per run (avg): ", allocs) if allocs > 2.1 { t.Fatal("Too many allocs, be sure to disable logging for real builds") } }
func TestMapAlloc(t *testing.T) { if n := testing.AllocsPerRun(3, func() { Map(idem).Transform(nil, nil, false) }); n > 0 { t.Errorf("got %f; want 0", n) } }
func TestRemoveAlloc(t *testing.T) { if n := testing.AllocsPerRun(3, func() { Remove(Predicate(rmNop)).Transform(nil, nil, false) }); n > 0 { t.Errorf("got %f; want 0", n) } }
func TestMakeString(t *testing.T) { tests := []struct{ in, out string }{ {"und", "und"}, {"und", "und-CW"}, {"nl", "nl-NL"}, {"de-1901", "nl-1901"}, {"de-1901", "de-Arab-1901"}, {"x-a-b", "de-Arab-x-a-b"}, {"x-a-b", "x-a-b"}, } for i, tt := range tests { id, _ := Parse(tt.in) mod, _ := Parse(tt.out) id.setTagsFrom(mod) for j := 0; j < 2; j++ { id.remakeString() if str := id.String(); str != tt.out { t.Errorf("%d:%d: found %s; want %s", i, j, id.String(), tt.out) } } // The bytes to string conversion as used in remakeString // occasionally measures as more than one alloc, breaking this test. // To alleviate this we set the number of runs to more than 1. if n := testing.AllocsPerRun(8, id.remakeString); n > 1 { t.Errorf("%d: # allocs got %.1f; want <= 1", i, n) } } }
// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a // memFile doesn't allocate a new buffer for each of those N times. Otherwise, // calling io.Copy(aMemFile, src) is likely to have quadratic complexity. func TestMemFileWriteAllocs(t *testing.T) { if runtime.Compiler == "gccgo" { t.Skip("gccgo allocates here") } fs := NewMemFS() f, err := fs.OpenFile("/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { t.Fatalf("OpenFile: %v", err) } defer f.Close() xxx := make([]byte, 1024) for i := range xxx { xxx[i] = 'x' } a := testing.AllocsPerRun(100, func() { f.Write(xxx) }) // AllocsPerRun returns an integral value, so we compare the rounded-down // number to zero. if a > 0 { t.Fatalf("%v allocs per run, want 0", a) } }
func TestCountMallocs(t *testing.T) { for _, mt := range mallocTest { allocs := testing.AllocsPerRun(100, mt.fn) if max := float64(mt.count); allocs > max { t.Errorf("%s: %v allocs, want <=%v", mt.desc, allocs, max) } } }
func TestCountMallocs(t *testing.T) { for _, mt := range mallocTest { mallocs := testing.AllocsPerRun(100, mt.fn) if got, max := mallocs, float64(mt.count); got > max { t.Errorf("%s: got %v allocs, want <=%v", mt.desc, got, max) } } }
func TestCountMallocs(t *testing.T) { for _, mt := range mallocTest { allocs := int(testing.AllocsPerRun(100, mt.fn)) if allocs > mt.count { t.Errorf("%s: %d allocs, want %d", mt.desc, allocs, mt.count) } } }
func TestKVClaimAllocs(t *testing.T) { n := testing.AllocsPerRun(20, func() { index.ExpKvClaim("claim|sha1-b380b3080f9c71faa5c1d82bbd4d583a473bc77d|2931A67C26F5ABDA|2011-11-28T01:32:37.000123456Z|sha1-b3d93daee62e40d36237ff444022f42d7d0e43f2", "set-attribute|tag|foo1|sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007", blob.Parse) }) t.Logf("%v allocations", n) }
func TestAllocs(t *testing.T) { var x []struct{} allocs := testing.AllocsPerRun(500, func() { x = iter.N(1e9) }) if allocs > 0.1 { t.Errorf("allocs = %v", allocs) } }
func TestNonEscapingMap(t *testing.T) { n := testing.AllocsPerRun(1000, func() { m := make(map[int]int) m[0] = 0 }) if n != 0 { t.Fatalf("want 0 allocs, got %v", n) } }
func TestLookupMallocs(t *testing.T) { n := testing.AllocsPerRun(10000, func() { TypeByExtension(".html") TypeByExtension(".HtML") }) if n > 0 { t.Errorf("allocs = %v; want 0", n) } }
func TestSearchWrappersDontAlloc(t *testing.T) { if runtime.GOMAXPROCS(0) > 1 { t.Skip("skipping; GOMAXPROCS>1") } allocs := testing.AllocsPerRun(100, runSearchWrappers) if allocs != 0 { t.Errorf("expected no allocs for runSearchWrappers, got %v", allocs) } }
func TestStructHash(t *testing.T) { // See the comment in TestArrayHash. f := func() { type key struct { a, b, c, d, e, f, g, h string } m := make(map[key]bool, 70) // fill m with keys that have 4 "foo"s and 4 ""s. for i := 0; i < 256; i++ { var k key cnt := 0 if i&1 != 0 { k.a = "foo" cnt++ } if i&2 != 0 { k.b = "foo" cnt++ } if i&4 != 0 { k.c = "foo" cnt++ } if i&8 != 0 { k.d = "foo" cnt++ } if i&16 != 0 { k.e = "foo" cnt++ } if i&32 != 0 { k.f = "foo" cnt++ } if i&64 != 0 { k.g = "foo" cnt++ } if i&128 != 0 { k.h = "foo" cnt++ } if cnt == 4 { m[k] = true } } if len(m) != 70 { t.Errorf("bad test: (8 choose 4) should be 70, not %d", len(m)) } } if n := testing.AllocsPerRun(10, f); n > 6 { t.Errorf("too many allocs %f - hash not balanced", n) } }
func TestScannerAllocs(t *testing.T) { data := []byte(`a=1 b="bar" ƒ=2h3s r="esc\t" d x=sf `) h := func(key, val []byte) error { return nil } allocs := testing.AllocsPerRun(1000, func() { gotoScanner(data, HandlerFunc(h)) }) if allocs > 1 { t.Errorf("got %f, want <=1", allocs) } }
func TestDNSDoesNotMalloc(t *testing.T) { var dns DNS if n := testing.AllocsPerRun(1000, func() { if err := dns.DecodeFromBytes(testDNSAAAA[ /*eth*/ 14+ /*ipv4*/ 20+ /*udp*/ 8:], gopacket.NilDecodeFeedback); err != nil { t.Fatal(err) } }); n > 0 { t.Error(n, "mallocs decoding DNS") } }
func TesVxlanDoesNotMalloc(t *testing.T) { var vxlan VXLAN if n := testing.AllocsPerRun(1000, func() { if err := vxlan.DecodeFromBytes(testUDPPacketVXLAN[ /*eth*/ 14+ /*ipv4*/ 20+ /*udp*/ 8:], gopacket.NilDecodeFeedback); err != nil { t.Fatal(err) } }); n > 0 { t.Error(n, "mallocs decoding Vxlan") } }