Exemple #1
0
func (self *ProcList) Get() error {

	var enumSize int
	var pids [1024]C.DWORD

	// If the function succeeds, the return value is nonzero.
	ret, _, _ := procEnumProcesses.Call(
		uintptr(unsafe.Pointer(&pids[0])),
		uintptr(unsafe.Sizeof(pids)),
		uintptr(unsafe.Pointer(&enumSize)),
	)
	if ret == 0 {
		return syscall.GetLastError()
	}

	results := []int{}

	pids_size := enumSize / int(unsafe.Sizeof(pids[0]))

	for _, pid := range pids[:pids_size] {
		results = append(results, int(pid))
	}

	self.List = results

	return nil
}
// The C side of things will still need to allocate memory, due to the slices.
// Assumes Configuration is valid.
func (config *Configuration) _CGO() *C.CGO_Configuration {
	INFO.Println("Converting Config: ", config)
	size := C.size_t(unsafe.Sizeof(C.CGO_Configuration{}))
	c := (*C.CGO_Configuration)(C.malloc(size))

	// Need to convert each IceServer struct individually.
	total := len(config.IceServers)
	if total > 0 {
		sizeof := unsafe.Sizeof(C.CGO_IceServer{})
		cServers := unsafe.Pointer(C.malloc(C.size_t(sizeof * uintptr(total))))
		ptr := uintptr(cServers)
		for _, server := range config.IceServers {
			*(*C.CGO_IceServer)(unsafe.Pointer(ptr)) = server._CGO()
			ptr += sizeof
		}
		c.iceServers = (*C.CGO_IceServer)(cServers)
	}
	c.numIceServers = C.int(total)

	// c.iceServers = (*C.CGO_IceServer)(unsafe.Pointer(&config.IceServers))
	c.iceTransportPolicy = C.int(config.IceTransportPolicy)
	c.bundlePolicy = C.int(config.BundlePolicy)
	// [ED] c.RtcpMuxPolicy = C.int(config.RtcpMuxPolicy)
	c.peerIdentity = C.CString(config.PeerIdentity)
	// [ED] c.Certificates = config.Certificates
	// [ED] c.IceCandidatePoolSize = C.int(config.IceCandidatePoolSize)
	return c
}
Exemple #3
0
func mach_semcreate() uint32 {
	var m [256]uint8
	tx := (*tmach_semcreatemsg)(unsafe.Pointer(&m))
	rx := (*rmach_semcreatemsg)(unsafe.Pointer(&m))

	tx.h.msgh_bits = 0
	tx.h.msgh_size = uint32(unsafe.Sizeof(*tx))
	tx.h.msgh_remote_port = mach_task_self()
	tx.h.msgh_id = tmach_semcreate
	tx.ndr = zerondr

	tx.policy = 0 // 0 = SYNC_POLICY_FIFO
	tx.value = 0

	for {
		r := machcall(&tx.h, int32(unsafe.Sizeof(m)), int32(unsafe.Sizeof(*rx)))
		if r == 0 {
			break
		}
		if r == _KERN_ABORTED { // interrupted
			continue
		}
		macherror(r, "semaphore_create")
	}
	if rx.body.msgh_descriptor_count != 1 {
		unimplemented("mach_semcreate desc count")
	}
	return rx.semaphore.name
}
Exemple #4
0
func main() {
	example := &Example{
		BoolValue: true,
	}

	exampleNext := &Example{
		BoolValue: true,
	}

	alignmentBoundary := unsafe.Alignof(example)

	sizeBool := unsafe.Sizeof(example.BoolValue)
	offsetBool := unsafe.Offsetof(example.BoolValue)

	sizeBoolNext := unsafe.Sizeof(exampleNext.BoolValue)
	offsetBoolNext := unsafe.Offsetof(exampleNext.BoolValue)

	fmt.Printf("Alignment Boundary: %d\n", alignmentBoundary)

	fmt.Printf("BoolValue = Size: %d Offset: %d Addr: %v\n",
		sizeBool, offsetBool, &example.BoolValue)

	fmt.Printf("Next = Size: %d Offset: %d Addr: %v\n",
		sizeBoolNext, offsetBoolNext, &exampleNext.BoolValue)
}
func webView_DWebBrowserEvents2_Invoke(
	wbe2 *webViewDWebBrowserEvents2,
	dispIdMember win.DISPID,
	riid win.REFIID,
	lcid uint32, // LCID
	wFlags uint16,
	pDispParams *win.DISPPARAMS,
	pVarResult *win.VARIANT,
	pExcepInfo unsafe.Pointer, // *EXCEPINFO
	puArgErr *uint32) uintptr {

	var wb WidgetBase
	var wvcs webViewIOleClientSite

	wv := (*WebView)(unsafe.Pointer(uintptr(unsafe.Pointer(wbe2)) +
		uintptr(unsafe.Sizeof(*wbe2)) -
		uintptr(unsafe.Sizeof(wvcs)) -
		uintptr(unsafe.Sizeof(wb))))

	switch dispIdMember {
	case win.DISPID_NAVIGATECOMPLETE2:
		wv.urlChangedPublisher.Publish()
	}

	return win.DISP_E_MEMBERNOTFOUND
}
Exemple #6
0
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
	if b.typ != memProfile {
		throw("bad use of bucket.mp")
	}
	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
	return (*memRecord)(data)
}
Exemple #7
0
// BlockProfile returns n, the number of records in the current blocking profile.
// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
// If len(p) < n, BlockProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package or
// the testing package's -test.blockprofile flag instead
// of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
	lock(&proflock)
	for b := bbuckets; b != nil; b = b.allnext {
		n++
	}
	if n <= len(p) {
		ok = true
		for b := bbuckets; b != nil; b = b.allnext {
			bp := b.bp()
			r := &p[0]
			r.Count = bp.count
			r.Cycles = bp.cycles
			if raceenabled {
				racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(unsafe.Pointer(&p)), funcPC(BlockProfile))
			}
			if msanenabled {
				msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
			}
			i := copy(r.Stack0[:], b.stk())
			for ; i < len(r.Stack0); i++ {
				r.Stack0[i] = 0
			}
			p = p[1:]
		}
	}
	unlock(&proflock)
	return
}
func allocHashTableSomeStructFileBacked(initialSize uint64, filepath string) *HashTableSomeStruct {
	metaSize := unsafe.Sizeof(HashTableMetadataSomeStruct{})
	cellSize := unsafe.Sizeof(CellSomeStruct{})
	customSize := unsafe.Sizeof(HashTableCustomMetadataSomeStruct{})
	var toAlloc int64 = -1
	if initialSize > 0 {
		toAlloc = int64(metaSize + customSize + uintptr(initialSize+1)*cellSize)
	}

	mmm := *util.Malloc(toAlloc, filepath)

	baseP := unsafe.Pointer(&mmm.Mem[0])
	base := (uintptr)(baseP)

	zeroCell := base + metaSize + customSize

	h := &HashTableSomeStruct{
		cellSize:     cellSize,
		offheap:      mmm.Mem,
		offheapCells: mmm.Mem[metaSize:],
		mmm:          mmm,
		zeroCell:     zeroCell,
		cells:        zeroCell + cellSize,
	}

	// check metadata
	h.HashTableMetadataSomeStruct = (*HashTableMetadataSomeStruct)(baseP)
	h.HashTableCustomMetadataSomeStruct = (*HashTableCustomMetadataSomeStruct)((unsafe.Pointer)(base + metaSize))
	return h
}
Exemple #9
0
//export gostream
func gostream(_, ctx unsafe.Pointer, n C.size_t, paths, flags, ids uintptr) {
	const (
		offchar = unsafe.Sizeof((*C.char)(nil))
		offflag = unsafe.Sizeof(C.FSEventStreamEventFlags(0))
		offid   = unsafe.Sizeof(C.FSEventStreamEventId(0))
	)
	if n == 0 {
		return
	}
	ev := make([]FSEvent, 0, int(n))
	for i := uintptr(0); i < uintptr(n); i++ {
		switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); {
		case flags&uint32(FSEventsEventIdsWrapped) != 0:
			atomic.StoreUint64(&since, uint64(C.FSEventsGetCurrentEventId()))
		default:
			ev = append(ev, FSEvent{
				Path:  C.GoString(*(**C.char)(unsafe.Pointer(paths + i*offchar))),
				Flags: flags,
				ID:    *(*uint64)(unsafe.Pointer(ids + i*offid)),
			})
		}

	}
	(*(*streamFunc)(ctx))(ev)
}
Exemple #10
0
func getScrollPos(hwnd _HWND) (xpos int32, ypos int32) {
	var si _SCROLLINFO

	si.cbSize = uint32(unsafe.Sizeof(si))
	si.fMask = _SIF_POS | _SIF_TRACKPOS
	r1, _, err := _getScrollInfo.Call(
		uintptr(hwnd),
		uintptr(_SB_HORZ),
		uintptr(unsafe.Pointer(&si)))
	if r1 == 0 { // failure
		panic(fmt.Errorf("error getting horizontal scroll position for Area: %v", err))
	}
	xpos = si.nPos
	si.cbSize = uint32(unsafe.Sizeof(si)) // MSDN example code reinitializes this each time, so we'll do it too just to be safe
	si.fMask = _SIF_POS | _SIF_TRACKPOS
	r1, _, err = _getScrollInfo.Call(
		uintptr(hwnd),
		uintptr(_SB_VERT),
		uintptr(unsafe.Pointer(&si)))
	if r1 == 0 { // failure
		panic(fmt.Errorf("error getting vertical scroll position for Area: %v", err))
	}
	ypos = si.nPos
	return xpos, ypos
}
Exemple #11
0
func adjustAreaScrollbars(s *sysData) {
	var si _SCROLLINFO

	cwid, cht := getAreaControlSize(s.hwnd)

	// the trick is we want a page to be the width/height of the visible area
	// so the scroll range would go from [0..image_dimension - control_dimension]
	// but judging from the sample code on MSDN, we don't need to do this; the scrollbar will do it for us
	// we DO need to handle it when scrolling, though, since the thumb can only go up to this upper limit

	// have to do horizontal and vertical separately
	si.cbSize = uint32(unsafe.Sizeof(si))
	si.fMask = _SIF_RANGE | _SIF_PAGE
	si.nMin = 0
	si.nMax = int32(s.areawidth - 1) // the max point is inclusive, so we have to pass in the last valid value, not the first invalid one (see http://blogs.msdn.com/b/oldnewthing/archive/2003/07/31/54601.aspx); if we don't, we get weird things like the scrollbar sometimes showing one extra scroll position at the end that you can never scroll to
	si.nPage = uint32(cwid)
	_setScrollInfo.Call(
		uintptr(s.hwnd),
		uintptr(_SB_HORZ),
		uintptr(unsafe.Pointer(&si)),
		uintptr(_TRUE)) // redraw the scroll bar

	si.cbSize = uint32(unsafe.Sizeof(si)) // MSDN sample code does this a second time; let's do it too to be safe
	si.fMask = _SIF_RANGE | _SIF_PAGE
	si.nMin = 0
	si.nMax = int32(s.areaheight - 1)
	si.nPage = uint32(cht)
	_setScrollInfo.Call(
		uintptr(s.hwnd),
		uintptr(_SB_VERT),
		uintptr(unsafe.Pointer(&si)),
		uintptr(_TRUE)) // redraw the scroll bar
}
Exemple #12
0
func slicerunetostring(buf *tmpBuf, a []rune) string {
	if raceenabled && len(a) > 0 {
		racereadrangepc(unsafe.Pointer(&a[0]),
			uintptr(len(a))*unsafe.Sizeof(a[0]),
			getcallerpc(unsafe.Pointer(&buf)),
			funcPC(slicerunetostring))
	}
	if msanenabled && len(a) > 0 {
		msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
	}
	var dum [4]byte
	size1 := 0
	for _, r := range a {
		size1 += encoderune(dum[:], r)
	}
	s, b := rawstringtmp(buf, size1+3)
	size2 := 0
	for _, r := range a {
		// check for race
		if size2 >= size1 {
			break
		}
		size2 += encoderune(b[size2:], r)
	}
	return s[:size2]
}
Exemple #13
0
// Saves stats from the socket to the TPacket instance
func (h *TPacket) SocketStats() (SocketStats, SocketStatsV3, error) {
	h.mu.Lock()
	defer h.mu.Unlock()
	// We need to save the counters since asking for the stats will clear them
	if h.tpVersion == TPacketVersion3 {
		prevStats := h.socketStatsV3
		socklen := unsafe.Sizeof(h.socketStatsV3)
		var slt C.socklen_t = C.socklen_t(socklen)
		_, err := C.getsockopt(h.fd, C.SOL_PACKET, C.PACKET_STATISTICS, unsafe.Pointer(&h.socketStatsV3), &slt)
		if err != nil {
			return SocketStats{}, SocketStatsV3{}, err
		}

		h.socketStatsV3.tp_packets += prevStats.tp_packets
		h.socketStatsV3.tp_drops += prevStats.tp_drops
		h.socketStatsV3.tp_freeze_q_cnt += prevStats.tp_freeze_q_cnt

		return h.socketStats, h.socketStatsV3, nil
	} else {
		prevStats := h.socketStats
		socklen := unsafe.Sizeof(h.socketStats)
		var slt C.socklen_t = C.socklen_t(socklen)
		_, err := C.getsockopt(h.fd, C.SOL_PACKET, C.PACKET_STATISTICS, unsafe.Pointer(&h.socketStats), &slt)
		if err != nil {
			return SocketStats{}, SocketStatsV3{}, err
		}

		h.socketStats.tp_packets += prevStats.tp_packets
		h.socketStats.tp_drops += prevStats.tp_drops

		return h.socketStats, h.socketStatsV3, nil

	}
}
Exemple #14
0
func CreateVOB() {
    Verticies := [...]gl.Float{-0.8f, -0.8f, 0.0f, 1.0f,
                               0.0f,  0.8f, 0.0f, 1.0f,
                               0.8f, -0.8f, 0.0f, 1.0f}

    Colours := [...]gl.Float{1.0f, 0.0f, 0.0f, 1.0f,
                         0.0f, 1.0f, 0.0f, 1.0f,
                         0.0f, 0.0f, 1.0f, 1.0f}

    BufferSize = unsafe.Sizeof(Verticies)
    VertexSize = unsafe.Sizeof(Verticies[0])
    RgbOffset  = unsafe.Sizeof(Verticies[0].XYZW)

    gl.GenVertexArrays(1, &VaoId)
    gl.BindVertexArray(VaoId)

    gl.GenBuffers(1, &VboId)
    gl.BindBuffer(gl.ARRAY_BUFFER, VboId)
    gl.BufferData(gl.ARRAY_BUFFER, unsafe.Sizeof(Verticies), Verticies, gl.STATIC_DRAW)
    gl.VertexAttribPointer(1, 4, gl.Float, gl.FALSE, 0, 0)
    gl.EnableVertexAttribArray(1)

    if err := gl.GetError(); err != gl.NO_ERROR {
    	fmt.Println(err, "ERROR: Could not create a VBO")
    }
}
Exemple #15
0
//export fsevtCallback
func fsevtCallback(stream C.FSEventStreamRef, info uintptr, numEvents C.size_t, paths **C.char, flags *C.FSEventStreamEventFlags, ids *C.FSEventStreamEventId) {
	events := make([]Event, int(numEvents))

	es := registry.Get(info)
	if es == nil {
		return
	}

	for i := 0; i < int(numEvents); i++ {
		cpaths := uintptr(unsafe.Pointer(paths)) + (uintptr(i) * unsafe.Sizeof(*paths))
		cpath := *(**C.char)(unsafe.Pointer(cpaths))

		cflags := uintptr(unsafe.Pointer(flags)) + (uintptr(i) * unsafe.Sizeof(*flags))
		cflag := *(*C.FSEventStreamEventFlags)(unsafe.Pointer(cflags))

		cids := uintptr(unsafe.Pointer(ids)) + (uintptr(i) * unsafe.Sizeof(*ids))
		cid := *(*C.FSEventStreamEventId)(unsafe.Pointer(cids))

		events[i] = Event{Path: C.GoString(cpath), Flags: EventFlags(cflag), ID: uint64(cid)}
		// Record the latest EventID to support resuming the stream
		es.EventID = uint64(cid)
	}

	es.Events <- events
}
Exemple #16
0
func cpuInitSearchKeys(commandQueue cl.CL_command_queue,
	svmSearchBuf unsafe.Pointer) {
	var nextData *searchKey
	var status cl.CL_int

	status = cl.CLEnqueueSVMMap(commandQueue,
		cl.CL_TRUE, //blocking call
		cl.CL_MAP_WRITE_INVALIDATE_REGION,
		svmSearchBuf,
		cl.CL_size_t(NUMBER_OF_SEARCH_KEY*unsafe.Sizeof(sampleKey)),
		0,
		nil,
		nil)
	utils.CHECK_STATUS(status, cl.CL_SUCCESS, "clEnqueueSVMMap(svmSearchBuf)")

	r := rand.New(rand.NewSource(999))

	// initialize nodes
	for i := 0; i < NUMBER_OF_SEARCH_KEY; i++ {
		nextData = (*searchKey)(unsafe.Pointer(uintptr(svmSearchBuf) + uintptr(i)*unsafe.Sizeof(sampleKey)))
		// allocate a random value to node
		nextData.key = cl.CL_int(r.Int())
		// all pointers are null
		nextData.oclNode = nil
		nextData.nativeNode = nil
	}

	status = cl.CLEnqueueSVMUnmap(commandQueue,
		svmSearchBuf,
		0,
		nil,
		nil)
	utils.CHECK_STATUS(status, cl.CL_SUCCESS, "clEnqueueSVMUnmap(svmSearchBuf)")
}
Exemple #17
0
func selectsize(size uintptr) uintptr {
	selsize := unsafe.Sizeof(hselect{}) +
		(size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
		size*unsafe.Sizeof(*hselect{}.lockorder) +
		size*unsafe.Sizeof(*hselect{}.pollorder)
	return round(selsize, sys.Int64Align)
}
Exemple #18
0
func svmCompareResults(commandQueue cl.CL_command_queue,
	svmSearchBuf unsafe.Pointer) bool {
	var compare_status bool
	var status cl.CL_int

	status = cl.CLEnqueueSVMMap(commandQueue,
		cl.CL_TRUE, //blocking call
		cl.CL_MAP_WRITE_INVALIDATE_REGION,
		svmSearchBuf,
		cl.CL_size_t(NUMBER_OF_SEARCH_KEY*unsafe.Sizeof(sampleKey)),
		0,
		nil,
		nil)
	utils.CHECK_STATUS(status, cl.CL_SUCCESS, "clEnqueueSVMMap(svmSearchBuf)")

	compare_status = true
	for i := 0; i < NUMBER_OF_SEARCH_KEY; i++ {
		currKey := (*searchKey)(unsafe.Pointer(uintptr(svmSearchBuf) + uintptr(i)*unsafe.Sizeof(sampleKey)))

		/* compare OCL and native nodes */
		if currKey.oclNode != currKey.nativeNode {
			compare_status = false
			break
		}
	}

	status = cl.CLEnqueueSVMUnmap(commandQueue,
		svmSearchBuf,
		0,
		nil,
		nil)
	utils.CHECK_STATUS(status, cl.CL_SUCCESS, "clEnqueueSVMUnmap(svmSearchBuf)")

	return compare_status
}
Exemple #19
0
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
	if b.typ != blockProfile {
		throw("bad use of bucket.bp")
	}
	data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
	return (*blockRecord)(data)
}
func (s *Sprite) GetVAO() (VAO uint32) {

	gl.GenVertexArrays(1, &VAO)
	gl.BindVertexArray(VAO)

	gl.GenBuffers(1, &s.vbo)
	gl.BindBuffer(gl.ARRAY_BUFFER, s.vbo)

	gl.BufferData(gl.ARRAY_BUFFER, len(s.vertexData)*4, gl.Ptr(&s.vertexData[0]), gl.STATIC_DRAW)

	attrib_loc := uint32(gl.GetAttribLocation(s.program, gl.Str("vert\x00")))
	color_loc := uint32(gl.GetAttribLocation(s.program, gl.Str("vertColor\x00")))
	gl.EnableVertexAttribArray(attrib_loc)
	gl.VertexAttribPointer(attrib_loc, 3, gl.FLOAT, false, int32(unsafe.Sizeof(s.vertexData[0]))*7, nil)

	gl.EnableVertexAttribArray(color_loc)
	gl.VertexAttribPointer(color_loc, 4, gl.FLOAT, false, int32(unsafe.Sizeof(s.vertexData[0]))*7, gl.PtrOffset(3*4))

	time_loc := gl.GetUniformLocation(s.program, gl.Str("time\x00"))

	gl.Uniform1f(time_loc, s.runtime)

	gl.BindBuffer(gl.ARRAY_BUFFER, 0)

	gl.BindVertexArray(0)

	//Update
	time := glfw.GetTime()
	elapsed := float32(time) - s.previousTime
	s.previousTime = float32(time)
	s.runtime = s.runtime + elapsed
	return
}
Exemple #21
0
func init() {
	var memStats MemStats
	if sizeof_C_MStats != unsafe.Sizeof(memStats) {
		println(sizeof_C_MStats, unsafe.Sizeof(memStats))
		throw("MStats vs MemStatsType size mismatch")
	}
}
Exemple #22
0
func newBitmapFromHBITMAP(hBmp win.HBITMAP) (bmp *Bitmap, err error) {
	var dib win.DIBSECTION
	if win.GetObject(win.HGDIOBJ(hBmp), unsafe.Sizeof(dib), unsafe.Pointer(&dib)) == 0 {
		return nil, newError("GetObject failed")
	}

	bmih := &dib.DsBmih

	bmihSize := uintptr(unsafe.Sizeof(*bmih))
	pixelsSize := uintptr(int32(bmih.BiBitCount)*bmih.BiWidth*bmih.BiHeight) / 8

	totalSize := uintptr(bmihSize + pixelsSize)

	hPackedDIB := win.GlobalAlloc(win.GHND, totalSize)
	dest := win.GlobalLock(hPackedDIB)
	defer win.GlobalUnlock(hPackedDIB)

	src := unsafe.Pointer(&dib.DsBmih)

	win.MoveMemory(dest, src, bmihSize)

	dest = unsafe.Pointer(uintptr(dest) + bmihSize)
	src = dib.DsBm.BmBits

	win.MoveMemory(dest, src, pixelsSize)

	return &Bitmap{
		hBmp:       hBmp,
		hPackedDIB: hPackedDIB,
		size: Size{
			int(bmih.BiWidth),
			int(bmih.BiHeight),
		},
	}, nil
}
Exemple #23
0
func main() {
	i64 := int64(9223372036854775807)
	s := "12345678901234567890123456789012345678901234567890123456789012345678901234567890"
	empty_struct := struct{}{}

	// this "i" is not the same one as in the "for" loop below
	var i int = 9223372036854775807
	// in other words: "the scope of the variable"
	// According to the language specification “Go is lexically scoped using blocks”.
	// Basically this means that the variable exists within the nearest curly braces { } (a block)
	// including any nested curly braces (blocks), but not outside of them.

	for i := 0; i < 3; time.Sleep(3 * time.Second) {
		// see: https://medium.com/@felixge/the-sleepy-for-loop-in-go-4e6fee88c5ad#.x891gbs0r
		// a "sleepy for loop" with 2 retries
		fmt.Printf("trying: %v at: %v\n", i, time.Now())
		i++
		if i == 2 {
			fmt.Printf("done: i=%v at: %v\n", i, time.Now())
			break
		}
	}
	fmt.Printf("i=%v %v\n", i, time.Now())
	fmt.Printf("i=%v i64=%v s=%v\n", unsafe.Sizeof(i), unsafe.Sizeof(i64), unsafe.Sizeof(s))
	fmt.Printf("empty_struct=%T=%v\n", empty_struct, unsafe.Sizeof(empty_struct))
}
Exemple #24
0
func hPackedDIBFromHBITMAP(hBmp win.HBITMAP) (win.HGLOBAL, error) {
	var dib win.DIBSECTION
	if win.GetObject(win.HGDIOBJ(hBmp), unsafe.Sizeof(dib), unsafe.Pointer(&dib)) == 0 {
		return 0, newError("GetObject failed")
	}

	bmihSize := uintptr(unsafe.Sizeof(dib.DsBmih))
	pixelsSize := uintptr(
		int32(dib.DsBmih.BiBitCount) * dib.DsBmih.BiWidth * dib.DsBmih.BiHeight)

	totalSize := bmihSize + pixelsSize

	hPackedDIB := win.GlobalAlloc(win.GHND, totalSize)
	dest := win.GlobalLock(hPackedDIB)
	defer win.GlobalUnlock(hPackedDIB)

	src := unsafe.Pointer(&dib.DsBmih)

	win.MoveMemory(dest, src, bmihSize)

	dest = unsafe.Pointer(uintptr(dest) + bmihSize)
	src = unsafe.Pointer(uintptr(src) + bmihSize)

	win.MoveMemory(dest, src, pixelsSize)

	return hPackedDIB, nil
}
Exemple #25
0
func (c *OCI8Conn) Prepare(query string) (driver.Stmt, error) {
	pquery := C.CString(query)
	defer C.free(unsafe.Pointer(pquery))
	var s, bp, defp unsafe.Pointer

	if rv := C.WrapOCIHandleAlloc(
		c.env,
		C.OCI_HTYPE_STMT,
		(C.size_t)(unsafe.Sizeof(bp)*2)); rv.rv != C.OCI_SUCCESS {
		return nil, ociGetError(c.err)
	} else {
		s = rv.ptr
		bp = rv.extra
		defp = unsafe.Pointer(uintptr(rv.extra) + unsafe.Sizeof(unsafe.Pointer(nil)))
	}

	if rv := C.OCIStmtPrepare(
		(*C.OCIStmt)(s),
		(*C.OCIError)(c.err),
		(*C.OraText)(unsafe.Pointer(pquery)),
		C.ub4(C.strlen(pquery)),
		C.ub4(C.OCI_NTV_SYNTAX),
		C.ub4(C.OCI_DEFAULT)); rv != C.OCI_SUCCESS {
		return nil, ociGetError(c.err)
	}

	ss := &OCI8Stmt{c: c, s: s, bp: (**C.OCIBind)(bp), defp: (**C.OCIDefine)(defp)}
	runtime.SetFinalizer(ss, (*OCI8Stmt).Close)
	return ss, nil
}
Exemple #26
0
func mapdelete(t unsafe.Pointer, m *map_, key unsafe.Pointer) {
	if m == nil {
		return
	}

	maptyp := (*mapType)(t)
	ptrsize := uintptr(unsafe.Sizeof(m.head.next))
	keysize := uintptr(maptyp.key.size)
	keyoffset := align(ptrsize, uintptr(maptyp.key.align))

	// Search for the entry with the specified key.
	keyalgs := unsafe.Pointer(maptyp.key.alg)
	keyeqptr := unsafe.Pointer(uintptr(keyalgs) + unsafe.Sizeof(maptyp.key.alg))
	keyeqfun := *(*unsafe.Pointer)(keyeqptr)
	var last *mapentry
	for ptr := m.head; ptr != nil; ptr = ptr.next {
		keyptr := unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) + keyoffset)
		if eqalg(keyeqfun, keysize, key, keyptr) {
			if last == nil {
				m.head = ptr.next
			} else {
				last.next = ptr.next
			}
			free(unsafe.Pointer(ptr))
			m.length--
			return
		}
		last = ptr
	}
}
Exemple #27
0
func msigsave(mp *m) {
	smask := (*uint32)(unsafe.Pointer(&mp.sigmask))
	if unsafe.Sizeof(*smask) > unsafe.Sizeof(mp.sigmask) {
		throw("insufficient storage for signal mask")
	}
	sigprocmask(_SIG_SETMASK, nil, smask)
}
Exemple #28
0
func mapdelete(t unsafe.Pointer, m_, key unsafe.Pointer) {
	if m_ == nil {
		return
	}
	m := (*map_)(m_)

	maptyp := (*mapType)(t)
	ptrsize := uintptr(unsafe.Sizeof(m_))
	keysize := uintptr(maptyp.key.size)
	keyoffset := align(ptrsize, uintptr(maptyp.key.align))

	// Search for the entry with the specified key.
	keyalgs := unsafe.Pointer(maptyp.key.alg)
	keyeqptr := unsafe.Pointer(uintptr(keyalgs) + unsafe.Sizeof(maptyp.key.alg))
	keyeqfun := *(*unsafe.Pointer)(keyeqptr)
	for i := 0; i < len(*m); i++ {
		ptr := (*m)[i]
		keyptr := unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) + keyoffset)
		if eqalg(keyeqfun, keysize, key, keyptr) {
			var tail []*mapentry
			if len(*m) > i+1 {
				tail = (*m)[i+1:]
			}
			(*m) = append((*m)[:i], tail...)
			free(unsafe.Pointer(ptr))
			return
		}
	}
}
Exemple #29
0
func (self *Job) GetProcesses() ([]uint, error) {
	var info wrappers.JOBOBJECT_BASIC_PROCESS_ID_LIST
	err := wrappers.QueryInformationJobObject(
		self.handle,
		wrappers.JobObjectBasicProcessIdList,
		(*byte)(unsafe.Pointer(&info)),
		uint32(unsafe.Sizeof(info)),
		nil)
	if err != nil && err != wrappers.ERROR_MORE_DATA {
		return nil, NewWindowsError("QueryInformationJobObject", err)
	}
	buf := make([]byte, unsafe.Sizeof(info)+unsafe.Sizeof(info.ProcessIdList[0])*uintptr(info.NumberOfAssignedProcesses-1))
	err = wrappers.QueryInformationJobObject(
		self.handle,
		wrappers.JobObjectBasicProcessIdList,
		&buf[0],
		uint32(len(buf)),
		nil)
	if err != nil {
		return nil, NewWindowsError("QueryInformationJobObject", err)
	}
	bufInfo := (*wrappers.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
	rawPids := make([]uintptr, bufInfo.NumberOfProcessIdsInList)
	wrappers.RtlMoveMemory(
		(*byte)(unsafe.Pointer(&rawPids[0])),
		(*byte)(unsafe.Pointer(&bufInfo.ProcessIdList[0])),
		uintptr(bufInfo.NumberOfProcessIdsInList)*unsafe.Sizeof(rawPids[0]))
	pids := make([]uint, bufInfo.NumberOfProcessIdsInList)
	for i, rawPid := range rawPids {
		pids[i] = uint(rawPid)
	}
	return pids, nil
}
Exemple #30
0
func Prepare(fun *Function, nargs uint) (*Callable, int) {
	var cif _C_ffi_cif
	var typp *_C_ffi_type
	ffi_type_void := ffi_type_for(TYPE_VOID, nil)
	// recover from panic here
	defer func() {
		if x := recover(); x != nil {
			println("panicking with value", x)
		}
		println("function returns normally") // executes only when hideErrors==true
	}()

	call := &Callable{}
	println("cif size: ", unsafe.Sizeof(cif))
	call.cifmem = Allocate(unsafe.Sizeof(cif) + 100)
	call.argimem = Allocate(unsafe.Sizeof(typp) * 10)
	call.cif = &call.cifs

	// (*C.ffi_cif)(call.cifmem.Ptr())
	// fmt.Println("cif ABI, nargs:", int(call.cif.abi), int(call.cif.nargs))
	call.cif.abi = FFI_DEFAULT_ABI
	call.cif.nargs = _C_uint(nargs)
	call.fun = fun
	call.cif.rtype = &ffi_type_void
	//stat          := 777
	println("cif address:", (unsafe.Pointer)(call.cif))
	println("void type address:", (unsafe.Pointer)(&ffi_type_void))
	println("cgo void type address:", (unsafe.Pointer)(&*_C_ffi_type_void))

	stat := _C_ffi_prep_cif(call.cif, FFI_DEFAULT_ABI, _C_uint(nargs), &ffi_type_void, nil)

	/*&C.ffi_type_void, (**C.ffi_type)(call.argimem.ptr))*/
	return call, int(stat)
}