// create a new zim reader func NewReader(path string, mmap bool) (*ZimReader, error) { f, err := os.Open(path) if err != nil { return nil, err } z := ZimReader{f: f, mainPage: 0xffffffff, layoutPage: 0xffffffff} fi, err := f.Stat() if err != nil { return nil, err } size := fi.Size() if mmap { // we need a multiple of page size bigger than the file pc := size / int64(os.Getpagesize()) totalMmap := pc*int64(os.Getpagesize()) + int64(os.Getpagesize()) if (size % int64(os.Getpagesize())) == 0 { totalMmap = size } mmap, err := syscall.Mmap(int(f.Fd()), 0, int(totalMmap), syscall.PROT_READ, syscall.MAP_PRIVATE) if err != nil { return nil, err } z.mmap = mmap } err = z.readFileHeaders() return &z, err }
// EdfAnonMap maps the EdfFile structure into RAM // IMPORTANT: everything's lost if unmapped func EdfAnonMap() (*EdfFile, error) { var err error // Allocate return structure ret := new(EdfFile) // Create mapping references ret.m = make([]mmap.Mmap, 0) // Get the page size pageSize := int64(os.Getpagesize()) // Segment size is the size of each mapped region ret.pageSize = uint64(pageSize) ret.segmentSize = uint64(EDF_LENGTH) * uint64(os.Getpagesize()) // Set the mode ret.mode = edfAnonMode // Allocate 4 pages initially ret.truncate(4) // Generate the header ret.createHeader() err = ret.writeInitialData() // Make sure this gets unmapped on garbage collection runtime.SetFinalizer(ret, edfCallFree) return ret, err }
// edfMap takes an os.File and returns an EdfMappedFile // structure, which represents the mmap'd underlying file // // The `mode` parameter takes the following values // EDF_CREATE: edfMap will truncate the file to the right length and write the correct header information // EDF_READ_WRITE: edfMap will verify header information // EDF_READ_ONLY: edfMap will verify header information // IMPORTANT: EDF_LENGTH (edf.go) controls the size of the address // space mapping. This means that the file can be truncated to the // correct size without remapping. On 32-bit systems, this // is set to 2GiB. func edfMap(f *os.File, mode int) (*EdfFile, error) { var err error // Set up various things ret := new(EdfFile) ret.f = f ret.m = make([]mmap.Mmap, 0) // Figure out the flags protFlags := mmap.PROT_READ if mode == EDF_READ_WRITE || mode == EDF_CREATE { protFlags |= mmap.PROT_WRITE } mapFlags := mmap.MAP_FILE | mmap.MAP_SHARED // Get the page size pageSize := int64(os.Getpagesize()) // Segment size is the size of each mapped region ret.pageSize = uint64(pageSize) ret.segmentSize = uint64(EDF_LENGTH) * uint64(os.Getpagesize()) // Map the file for i := int64(0); i < EDF_SIZE; i += int64(EDF_LENGTH) * pageSize { thisMapping, err := mmap.Map(f, i*pageSize, int(int64(EDF_LENGTH)*pageSize), protFlags, mapFlags) if err != nil { // TODO: cleanup return nil, err } ret.m = append(ret.m, thisMapping) } // Verify or generate the header if mode == EDF_READ_WRITE || mode == EDF_READ_ONLY { err = ret.verifyHeader() if err != nil { return nil, err } } else if mode == EDF_CREATE { err = ret.truncate(4) if err != nil { return nil, err } ret.createHeader() err = ret.writeInitialData() } else { err = fmt.Errorf("Unrecognised flags") } // Make sure this gets unmapped on garbage collection runtime.SetFinalizer(ret, edfCallFree) // Set the mode ret.mode = edfFileMode return ret, err }
func TestFileCreate(t *testing.T) { Convey("Creating a non-existent file should succeed", t, func() { tempFile, err := ioutil.TempFile(os.TempDir(), "TestFileCreate") So(err, ShouldEqual, nil) Convey("Mapping the file should succeed", func() { mapping, err := edfMap(tempFile, EDF_CREATE) So(err, ShouldEqual, nil) Convey("Unmapping the file should succeed", func() { err = mapping.unmap(EDF_UNMAP_SYNC) So(err, ShouldEqual, nil) }) // Read the magic bytes magic := make([]byte, 4) read, err := tempFile.ReadAt(magic, 0) Convey("Magic bytes should be correct", func() { So(err, ShouldEqual, nil) So(read, ShouldEqual, 4) So(magic[0], ShouldEqual, byte('G')) So(magic[1], ShouldEqual, byte('O')) So(magic[2], ShouldEqual, byte('L')) So(magic[3], ShouldEqual, byte('N')) }) // Read the file version versionBytes := make([]byte, 4) read, err = tempFile.ReadAt(versionBytes, 4) Convey("Version should be correct", func() { So(err, ShouldEqual, nil) So(read, ShouldEqual, 4) version := uint32FromBytes(versionBytes) So(version, ShouldEqual, EDF_VERSION) }) // Read the block size blockBytes := make([]byte, 4) read, err = tempFile.ReadAt(blockBytes, 8) Convey("Page size should be correct", func() { So(err, ShouldEqual, nil) So(read, ShouldEqual, 4) pageSize := uint32FromBytes(blockBytes) So(pageSize, ShouldEqual, os.Getpagesize()) }) // Check the file size is at least four * page size info, err := tempFile.Stat() Convey("File should be the right size", func() { So(err, ShouldEqual, nil) So(info.Size(), ShouldBeGreaterThanOrEqualTo, 4*os.Getpagesize()) }) }) }) }
// mmap the given file, get the mincore vector, then // return it as an []bool func FileMincore(f *os.File, size int64) ([]bool, error) { //skip could not mmap error when the file size is 0 if int(size) == 0 { return nil, nil } // mmap is a []byte mmap, err := unix.Mmap(int(f.Fd()), 0, int(size), unix.PROT_NONE, unix.MAP_SHARED) if err != nil { return nil, fmt.Errorf("could not mmap: %v", err) } // TODO: check for MAP_FAILED which is ((void *) -1) // but maybe unnecessary since it looks like errno is always set when MAP_FAILED // one byte per page, only LSB is used, remainder is reserved and clear vecsz := (size + int64(os.Getpagesize()) - 1) / int64(os.Getpagesize()) vec := make([]byte, vecsz) // get all of the arguments to the mincore syscall converted to uintptr mmap_ptr := uintptr(unsafe.Pointer(&mmap[0])) size_ptr := uintptr(size) vec_ptr := uintptr(unsafe.Pointer(&vec[0])) // use Go's ASM to submit directly to the kernel, no C wrapper needed // mincore(2): int mincore(void *addr, size_t length, unsigned char *vec); // 0 on success, takes the pointer to the mmap, a size, which is the // size that came from f.Stat(), and the vector, which is a pointer // to the memory behind an []byte // this writes a snapshot of the data into vec which a list of 8-bit flags // with the LSB set if the page in that position is currently in VFS cache ret, _, err := unix.Syscall(unix.SYS_MINCORE, mmap_ptr, size_ptr, vec_ptr) if ret != 0 { return nil, fmt.Errorf("syscall SYS_MINCORE failed: %v", err) } defer unix.Munmap(mmap) mc := make([]bool, vecsz) // there is no bitshift only bool for i, b := range vec { if b%2 == 1 { mc[i] = true } else { mc[i] = false } } return mc, nil }
func TestWalkMemoryDoesntOverlapTheBuffer(t *testing.T) { cmd, err := test.LaunchTestCaseAndWaitForInitialization() if err != nil { t.Fatal(err) } defer cmd.Process.Kill() pid := uint(cmd.Process.Pid) proc, err, softerrors := process.OpenFromPid(pid) test.PrintSoftErrors(softerrors) if err != nil { t.Fatal(err) } pageSize := uint(os.Getpagesize()) bufferSizes := []uint{1024, pageSize, pageSize + 100, pageSize * 2, pageSize*2 + 123} for _, size := range bufferSizes { lastRegion := MemoryRegion{} err, softerrors = WalkMemory(proc, 0, size, func(address uintptr, buffer []byte) (keepSearching bool) { currentRegion := MemoryRegion{Address: address, Size: uint(len(buffer))} if memoryRegionsOverlap(lastRegion, currentRegion) { t.Errorf("Regions overlap while reading %d at a time: %v %v", size, lastRegion, currentRegion) return false } lastRegion = currentRegion return true }) test.PrintSoftErrors(softerrors) if err != nil { t.Fatal(err) } } }
// Ensure a bucket can calculate stats. func TestBucket_Stats_Small(t *testing.T) { withOpenDB(func(db *DB, path string) { db.Update(func(tx *Tx) error { // Add a bucket that fits on a single root leaf. b, err := tx.CreateBucket([]byte("whozawhats")) assert.NoError(t, err) b.Put([]byte("foo"), []byte("bar")) return nil }) mustCheck(db) db.View(func(tx *Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() assert.Equal(t, stats.BranchPageN, 0) assert.Equal(t, stats.BranchOverflowN, 0) assert.Equal(t, stats.LeafPageN, 1) assert.Equal(t, stats.LeafOverflowN, 0) assert.Equal(t, stats.KeyN, 1) assert.Equal(t, stats.Depth, 1) if os.Getpagesize() != 4096 { // Incompatible page size assert.Equal(t, stats.BranchInuse, 0) assert.Equal(t, stats.BranchAlloc, 0) assert.Equal(t, stats.LeafInuse, 38) assert.Equal(t, stats.LeafAlloc, 4096) } return nil }) }) }
func newInputPreloader(filePath string) (*inputPreloader, error) { if filePath == "/dev/null" { return nil, nil } file, err := os.Open(filePath) if err != nil { return nil, err } info, err := file.Stat() if err != nil { return nil, err } preloader := &inputPreloader{ file: file, fileSize: info.Size(), } mapping, err := syscall.Mmap( int(preloader.file.Fd()), 0, int(preloader.fileSize), syscall.PROT_READ, syscall.MAP_SHARED, ) if err == nil { pageSize := os.Getpagesize() preloader.mapping = mapping for i := 0; i < int(preloader.fileSize); i += pageSize { preloader.checksum += preloader.mapping[i] } } else { // mmap failed, so just read all the file. io.Copy(ioutil.Discard, preloader.file) } return preloader, nil }
func TestBucket_Stats_EmptyBucket(t *testing.T) { withOpenDB(func(db *DB, path string) { db.Update(func(tx *Tx) error { // Add a bucket that fits on a single root leaf. _, err := tx.CreateBucket([]byte("whozawhats")) assert.NoError(t, err) return nil }) mustCheck(db) db.View(func(tx *Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") assert.Equal(t, 0, stats.KeyN, "KeyN") assert.Equal(t, 1, stats.Depth, "Depth") assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") if os.Getpagesize() == 4096 { // Incompatible page size assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") } assert.Equal(t, 1, stats.BucketN, "BucketN") assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") assert.Equal(t, pageHeaderSize, stats.InlineBucketInuse, "InlineBucketInuse") return nil }) }) }
func TestMonitorAndParseRIB(t *testing.T) { if testing.Short() || os.Getuid() != 0 { t.Skip("must be root") } // We suppose that using an IPv4 link-local address and the // dot1Q ID for Token Ring and FDDI doesn't harm anyone. pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} if err := pv.configure(1002); err != nil { t.Skip(err) } if err := pv.setup(); err != nil { t.Skip(err) } pv.teardown() s, err := syscall.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC) if err != nil { t.Fatal(err) } defer syscall.Close(s) go func() { b := make([]byte, os.Getpagesize()) for { n, err := syscall.Read(s, b) if err != nil { return } ms, err := ParseRIB(0, b[:n]) if err != nil { t.Error(err) return } ss, err := msgs(ms).validate() if err != nil { t.Error(err) return } for _, s := range ss { t.Log(s) } } }() for _, vid := range []int{1002, 1003, 1004, 1005} { pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} if err := pv.configure(vid); err != nil { t.Fatal(err) } if err := pv.setup(); err != nil { t.Fatal(err) } time.Sleep(200 * time.Millisecond) if err := pv.teardown(); err != nil { t.Fatal(err) } time.Sleep(200 * time.Millisecond) } }
func (s *AfpacketSniffer) Open(config *Config) error { // Capture settings const ( // MMap buffer size buffer_mb int = 24 // Max packet length snaplen int = 65536 // Set the interface in promiscuous mode promisc bool = true ) frame_size, block_size, num_blocks, err := afpacketComputeSize( buffer_mb, snaplen, os.Getpagesize()) if err != nil { return fmt.Errorf("Error calculating afpacket size: %s", err) } // Configure the afpacket ring and bind it to the interface var tPacket *afpacket.TPacket tPacket, err = afpacket.NewTPacket( afpacket.OptInterface(*iface), afpacket.OptFrameSize(frame_size), afpacket.OptBlockSize(block_size), afpacket.OptNumBlocks(num_blocks)) if err != nil { fmt.Errorf("Error opening afpacket interface: %s", err) } s.handle = tPacket return nil }
func TestAllocFixed(t *testing.T) { Convey("Creating a non-existent file should succeed", t, func() { tempFile, err := ioutil.TempFile(os.TempDir(), "TestFileCreate") So(err, ShouldEqual, nil) Convey("Mapping the file should succeed", func() { mapping, err := edfMap(tempFile, EDF_CREATE) So(err, ShouldEqual, nil) Convey("Allocation should succeed", func() { r, err := mapping.AllocPages(1, 2) So(err, ShouldEqual, nil) So(r.Start.Byte, ShouldEqual, 4*os.Getpagesize()) So(r.Start.Segment, ShouldEqual, 0) Convey("Unmapping the file should succeed", func() { err = mapping.unmap(EDF_UNMAP_SYNC) So(err, ShouldEqual, nil) Convey("Remapping the file should succeed", func() { mapping, err = edfMap(tempFile, EDF_READ_ONLY) Convey("Should get the same allocations back", func() { rr, err := mapping.getThreadBlocks(2) So(err, ShouldEqual, nil) So(len(rr), ShouldEqual, 1) So(rr[0], ShouldResemble, r) }) }) }) }) }) }) }
// use large buffer size. heuristic multiple of os size. func getSize() int { size := os.Getpagesize() if size < 65536 { return size * 16 } return size }
func TestPeek(t *testing.T) { pagesize := os.Getpagesize() peeks := []int{3, pagesize * 2 / 3, 3, pagesize - 1, 3, pagesize * 13 / 5, 3, 0, 3} counts := []int{1, 0, 0, 0, 0, 1, 0, 0, 1} sizes := []int{0, 1, 0, pagesize * 2 / 3, 0, pagesize * 11 / 7, 0, pagesize * 13 / 5, 0} lastAttempt := 511 incomplete := 255 lastCount := 0 total := incomplete for _, size := range sizes { total += size } rndr := newrndreader(23, 0, total, io.EOF, nil) r := bufio.NewReader(rndr) offset := 0 for i, size := range sizes { verifypeek(t, r, peeks[i], []error{nil}, rndr, 23, offset, peeks[i], counts[i]) offset = verifyread(t, r, size, []error{nil}, rndr, 23, offset, size) } verifypeek(t, r, lastAttempt, []error{io.EOF}, rndr, 23, offset, incomplete, lastCount) }
// OtherSystemInfo retrieves information from the system like hostname, IPv4 address, pagesize, // target architecture and target operating system. func OtherSystemInfo() map[string]string { otherInfoMap := make(map[string]string) // Hostname hostname, err := os.Hostname() if err != nil { otherInfoMap["hostname"] = "Error: The hostname for the current system could not be retrieved." } else { otherInfoMap["hostname"] = hostname } // IP address addresses, err := net.LookupHost(hostname) if err != nil { otherInfoMap["ipv4_address"] = "Error: The IPv4 address for the current system could not be retrieved." } else { for _, address := range addresses { ipv4_address := net.ParseIP(address).To4() if ipv4_address != nil { otherInfoMap["ipv4_address"] = address } } } otherInfoMap["os_pagesize"] = strconv.Itoa(os.Getpagesize()) otherInfoMap["target_architecture"] = runtime.GOARCH otherInfoMap["target_os"] = runtime.GOOS return otherInfoMap }
func TestAnonMap(t *testing.T) { Convey("Anonymous mapping should succeed", t, func() { mapping, err := EdfAnonMap() So(err, ShouldEqual, nil) bytes := mapping.m[0] // Read the magic bytes magic := bytes[0:4] Convey("Magic bytes should be correct", func() { So(magic[0], ShouldEqual, byte('G')) So(magic[1], ShouldEqual, byte('O')) So(magic[2], ShouldEqual, byte('L')) So(magic[3], ShouldEqual, byte('N')) }) // Read the file version versionBytes := bytes[4:8] Convey("Version should be correct", func() { version := uint32FromBytes(versionBytes) So(version, ShouldEqual, EDF_VERSION) }) // Read the block size blockBytes := bytes[8:12] Convey("Page size should be correct", func() { pageSize := uint32FromBytes(blockBytes) So(pageSize, ShouldEqual, os.Getpagesize()) }) }) }
// truncate changes the size of the underlying file // The size of the address space doesn't change. func (e *EdfFile) truncate(size int64) error { pageSize := int64(os.Getpagesize()) newSize := pageSize * size // Synchronise // e.Sync() // Double-check that we're not reducing file size fileInfo, err := e.f.Stat() if err != nil { return err } if fileInfo.Size() > newSize { return fmt.Errorf("Can't reduce file size!") } // Truncate the file err = e.f.Truncate(newSize) if err != nil { return err } // Verify that the file is larger now than it was fileInfo, err = e.f.Stat() if err != nil { return err } if fileInfo.Size() != newSize { return fmt.Errorf("Truncation failed: %d, %d", fileInfo.Size(), newSize) } return err }
// Ensure a bucket can calculate stats. func TestBucket_Stats(t *testing.T) { withOpenDB(func(db *DB, path string) { db.Update(func(tx *Tx) error { // Add bucket with fewer keys but one big value. _, err := tx.CreateBucket([]byte("woojits")) assert.NoError(t, err) b := tx.Bucket([]byte("woojits")) for i := 0; i < 500; i++ { b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) } b.Put([]byte("really-big-value"), []byte(strings.Repeat("*", 10000))) return nil }) mustCheck(db) db.View(func(tx *Tx) error { b := tx.Bucket([]byte("woojits")) stats := b.Stats() assert.Equal(t, stats.BranchPageN, 1) assert.Equal(t, stats.BranchOverflowN, 0) assert.Equal(t, stats.LeafPageN, 6) assert.Equal(t, stats.LeafOverflowN, 2) assert.Equal(t, stats.KeyN, 501) assert.Equal(t, stats.Depth, 2) if os.Getpagesize() != 4096 { // Incompatible page size assert.Equal(t, stats.BranchInuse, 125) assert.Equal(t, stats.BranchAlloc, 4096) assert.Equal(t, stats.LeafInuse, 20908) assert.Equal(t, stats.LeafAlloc, 32768) } return nil }) }) }
func TestBucket_Stats_EmptyBucket(t *testing.T) { db := NewTestDB() defer db.Close() db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. _, err := tx.CreateBucket([]byte("whozawhats")) ok(t, err) return nil }) db.MustCheck() db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() equals(t, 0, stats.BranchPageN) equals(t, 0, stats.BranchOverflowN) equals(t, 0, stats.LeafPageN) equals(t, 0, stats.LeafOverflowN) equals(t, 0, stats.KeyN) equals(t, 1, stats.Depth) equals(t, 0, stats.BranchInuse) equals(t, 0, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size equals(t, 0, stats.BranchAlloc) equals(t, 0, stats.LeafAlloc) } equals(t, 1, stats.BucketN) equals(t, 1, stats.InlineBucketN) equals(t, 16, stats.InlineBucketInuse) return nil }) }
func TestKVPutError(t *testing.T) { defer testutil.AfterTest(t) var ( maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go quota = int64(int(maxReqBytes) + 8*os.Getpagesize()) ) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota}) defer clus.Terminate(t) kv := clientv3.NewKV(clus.RandClient()) ctx := context.TODO() _, err := kv.Put(ctx, "", "bar") if err != rpctypes.ErrEmptyKey { t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err) } _, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) if err != rpctypes.ErrRequestTooLarge { t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err) } _, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50))) if err != nil { // below quota t.Fatal(err) } time.Sleep(1 * time.Second) // give enough time for commit _, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50))) if err != rpctypes.ErrNoSpace { // over quota t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err) } }
func open(name string) (*file, error) { fd, err := os.Open(name) if err != nil { return nil, err } return &file{fd, make([]byte, 0, os.Getpagesize()), false}, nil }
func TestSockStats(t *testing.T) { file, err := os.Open("fixtures/sockstat") if err != nil { t.Fatal(err) } defer file.Close() sockStats, err := parseSockStats(file, fileName) if err != nil { t.Fatal(err) } if want, got := "229", sockStats["sockets"]["used"]; want != got { t.Errorf("want sockstat sockets used %s, got %s", want, got) } if want, got := "4", sockStats["TCP"]["tw"]; want != got { t.Errorf("want sockstat TCP tw %s, got %s", want, got) } if want, got := "17", sockStats["TCP"]["alloc"]; want != got { t.Errorf("want sockstat TCP alloc %s, got %s", want, got) } // The test file has 1 for TCP mem, which is one page. So we should get the // page size in bytes back from sockstat_linux. We get the page size from // os here because this value can change from system to system. The value is // 4096 by default from linux 2.4 onward. if want, got := strconv.Itoa(os.Getpagesize()), sockStats["TCP"]["mem_bytes"]; want != got { t.Errorf("want sockstat TCP mem_bytes %s, got %s", want, got) } }
func main() { // 获取系统名字 fmt.Println(os.Hostname()) // 获取系统内存 fmt.Println(os.Getpagesize()) // 获取系统环境变量 for index, env := range os.Environ() { fmt.Println(index, " : ", env) } // 获取指定key的环境变量,环境变量不区分大小写 fmt.Println("当前系统目录为:", os.Getenv("windir")) // 设置环境变量 fmt.Println("cody的环境变量为:", os.Getenv("cody")) os.Setenv("Cody", "guo") fmt.Println("cody的环境变量为:", os.Getenv("cody")) // 删除所有环境变量 os.Clearenv() fmt.Println(os.Environ()) // 如果存在os.Exit()就不会执行defer // defer fmt.Println("我在退出吗?") // os.Exit(0) fmt.Println("程序已退出,不打印了...") fmt.Println(os.Getuid(), os.Getgid()) fmt.Println(os.Getgroups()) fmt.Println(os.Getpid(), os.Getppid()) fmt.Println(os.TempDir()) }
// Ensure that opening a file with two invalid checksums returns ErrChecksum. func TestOpen_ErrChecksum(t *testing.T) { if pageSize != os.Getpagesize() { t.Skip("page size mismatch") } // Create empty database. db := MustOpenDB() path := db.Path() defer db.MustClose() // Close database. if err := db.DB.Close(); err != nil { t.Fatal(err) } // Read data file. buf, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } // Rewrite meta pages. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.pgid++ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) meta1.pgid++ if err := ioutil.WriteFile(path, buf, 0666); err != nil { t.Fatal(err) } // Reopen data file. if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { t.Fatalf("unexpected error: %s", err) } }
func (s *S) TestIsResidentTwoPages(c *C) { testPath := path.Join(c.MkDir(), "test.txt") file, err := os.Create(testPath) c.Assert(err, IsNil) defer file.Close() file.Seek(int64(os.Getpagesize()*2-1), 0) file.Write([]byte{'x'}) mmap, err := gommap.Map(file.Fd(), gommap.PROT_READ|gommap.PROT_WRITE, gommap.MAP_PRIVATE) c.Assert(err, IsNil) defer mmap.UnsafeUnmap() // Not entirely a stable test, but should usually work. mmap[len(mmap)-1] = 'x' mapped, err := mmap.IsResident() c.Assert(err, IsNil) c.Assert(mapped, DeepEquals, []bool{false, true}) mmap[0] = 'x' mapped, err = mmap.IsResident() c.Assert(err, IsNil) c.Assert(mapped, DeepEquals, []bool{true, true}) }
// AddAttribute adds an Attribute to this set of DenseInstances // Creates a default AttributeGroup for it if a suitable one doesn't exist. // Returns an AttributeSpec for subsequent Set() calls. // // IMPORTANT: will panic if storage has been allocated via Extend. func (inst *DenseInstances) AddAttribute(a Attribute) AttributeSpec { var ok bool inst.lock.Lock() defer inst.lock.Unlock() if inst.fixed { panic("Can't add additional Attributes") } cur := 0 // Generate a default AttributeGroup name ag := "FLOAT" generatingBinClass := false if ag, ok = inst.tmpAttrAgMap[a]; ok { // Retrieved the group id } else if _, ok := a.(*CategoricalAttribute); ok { inst.catRowSizeBytes += 8 cur = inst.catRowSizeBytes / os.Getpagesize() ag = fmt.Sprintf("CAT%d", cur) } else if _, ok := a.(*FloatAttribute); ok { inst.floatRowSizeBytes += 8 cur = inst.floatRowSizeBytes / os.Getpagesize() ag = fmt.Sprintf("FLOAT%d", cur) } else if _, ok := a.(*BinaryAttribute); ok { inst.binRowSizeBits++ cur = (inst.binRowSizeBits / 8) / os.Getpagesize() ag = fmt.Sprintf("BIN%d", cur) generatingBinClass = true } else { panic("Unrecognised Attribute type") } // Create the ag if it doesn't exist if _, ok := inst.agMap[ag]; !ok { if !generatingBinClass { inst.createAttributeGroup(ag, 8) } else { inst.createAttributeGroup(ag, 0) } } id := inst.agMap[ag] p := inst.ags[id] p.AddAttribute(a) inst.attributes = append(inst.attributes, a) return AttributeSpec{id, len(p.Attributes()) - 1, a} }
func (this *RK3288) GetMMap(PhysicalAddr int64) ([]uint8, bool) { if this.hFile == nil { return nil, false } mem, err := syscall.Mmap(int(this.hFile.Fd()), PhysicalAddr, os.Getpagesize(), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) return mem, (err == nil) }
func TestBucket_Stats_EmptyBucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() if err := db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { t.Fatal(err) } return nil }); err != nil { t.Fatal(err) } db.MustCheck() if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() if stats.BranchPageN != 0 { t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN) } else if stats.BranchOverflowN != 0 { t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN) } else if stats.LeafPageN != 0 { t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN) } else if stats.LeafOverflowN != 0 { t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN) } else if stats.KeyN != 0 { t.Fatalf("unexpected KeyN: ", stats.KeyN) } else if stats.Depth != 1 { t.Fatalf("unexpected Depth: ", stats.Depth) } else if stats.BranchInuse != 0 { t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse) } else if stats.LeafInuse != 0 { t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse) } if os.Getpagesize() == 4096 { if stats.BranchAlloc != 0 { t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc) } else if stats.LeafAlloc != 0 { t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc) } } if stats.BucketN != 1 { t.Fatalf("unexpected BucketN: ", stats.BucketN) } else if stats.InlineBucketN != 1 { t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN) } else if stats.InlineBucketInuse != 16 { t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse) } return nil }); err != nil { t.Fatal(err) } }
// createHeader writes a valid header file into the file. // Unexported since it can cause data loss. func (e *EdfFile) createHeader() { e.m[0][0] = byte('G') e.m[0][1] = byte('O') e.m[0][2] = byte('L') e.m[0][3] = byte('N') uint32ToBytes(EDF_VERSION, e.m[0][4:8]) uint32ToBytes(uint32(os.Getpagesize()), e.m[0][8:12]) e.Sync() }
// Return a slice of MincoreState describing the in-core status of memory pages // in the mmap. You should bit OR with the various `MINCORE_*` flags to find // out the in-core state. func (m Mmap) Incore() ([]MincoreState, error) { pageSize := os.Getpagesize() vec := make([]MincoreState, (len(m)+pageSize-1)/pageSize) _, _, errno := syscall.Syscall(syscall.SYS_MINCORE, uintptr(unsafe.Pointer(&m[0])), uintptr(len(m)), uintptr(unsafe.Pointer(&vec[0]))) if errno != 0 { return nil, errno } return vec, nil }