//ReadKey performs a read operation by key. func (s *Session) ReadKey(key *Key, offset, size uint64) <-chan ReadResult { responseCh := make(chan ReadResult, defaultVOLUME) onResultContext := NextContext() onFinishContext := NextContext() onResult := func(result *readResult) { responseCh <- result } onFinish := func(err error) { if err != nil { responseCh <- &readResult{err: err} } close(responseCh) Pool.Delete(onResultContext) Pool.Delete(onFinishContext) } Pool.Store(onResultContext, onResult) Pool.Store(onFinishContext, onFinish) C.session_read_data(s.session, C.context_t(onResultContext), C.context_t(onFinishContext), key.key, C.uint64_t(offset), C.uint64_t(size)) return responseCh }
// Open creates options and opens the database. If the database // doesn't yet exist at the specified directory, one is initialized // from scratch. The RocksDB Open and Close methods are reference // counted such that subsequent Open calls to an already opened // RocksDB instance only bump the reference count. The RocksDB is only // closed when a sufficient number of Close calls are performed to // bring the reference count down to 0. func (r *RocksDB) Open() error { if r.rdb != nil { return nil } if len(r.dir) != 0 { log.Infof("opening rocksdb instance at %q", r.dir) } status := C.DBOpen(&r.rdb, goToCSlice([]byte(r.dir)), C.DBOptions{ cache_size: C.uint64_t(r.cacheSize), memtable_budget: C.uint64_t(r.memtableBudget), allow_os_buffer: C.bool(true), logging_enabled: C.bool(log.V(3)), }) err := statusToError(status) if err != nil { return util.Errorf("could not open rocksdb instance: %s", err) } // Start a goroutine that will finish when the underlying handle // is deallocated. This is used to check a leak in tests. go func() { <-r.deallocated }() r.stopper.AddCloser(r) return nil }
// Open creates options and opens the database. If the database // doesn't yet exist at the specified directory, one is initialized // from scratch. The RocksDB Open and Close methods are reference // counted such that subsequent Open calls to an already opened // RocksDB instance only bump the reference count. The RocksDB is only // closed when a sufficient number of Close calls are performed to // bring the reference count down to 0. func (r *RocksDB) Open() error { if r.rdb != nil { return nil } if r.memtableBudget < minMemtableBudget { return util.Errorf("memtable budget must be at least %s: %s", humanize.IBytes(minMemtableBudget), humanizeutil.IBytes(r.memtableBudget)) } var ver storageVersion if len(r.dir) != 0 { log.Infof("opening rocksdb instance at %q", r.dir) // Check the version number. var err error if ver, err = getVersion(r.dir); err != nil { return err } if ver < versionMinimum || ver > versionCurrent { // Instead of an error, we should call a migration if possible when // one is needed immediately following the DBOpen call. return fmt.Errorf("incompatible rocksdb data version, current:%d, on disk:%d, minimum:%d", versionCurrent, ver, versionMinimum) } } else { log.Infof("opening in memory rocksdb instance") // In memory dbs are always current. ver = versionCurrent } status := C.DBOpen(&r.rdb, goToCSlice([]byte(r.dir)), C.DBOptions{ cache_size: C.uint64_t(r.cacheSize), memtable_budget: C.uint64_t(r.memtableBudget), block_size: C.uint64_t(envutil.EnvOrDefaultBytes("rocksdb_block_size", defaultBlockSize)), wal_ttl_seconds: C.uint64_t(envutil.EnvOrDefaultDuration("rocksdb_wal_ttl", 0).Seconds()), allow_os_buffer: C.bool(true), logging_enabled: C.bool(log.V(3)), }) if err := statusToError(status); err != nil { return util.Errorf("could not open rocksdb instance: %s", err) } // Update or add the version file if needed. if ver < versionCurrent { if err := writeVersionFile(r.dir); err != nil { return err } } // Start a goroutine that will finish when the underlying handle // is deallocated. This is used to check a leak in tests. go func() { <-r.deallocated }() r.stopper.AddCloser(r) return nil }
func (cons *TrailDBConstructor) Add(cookie string, timestamp int64, values []string) error { if len(cookie) != 32 { return errors.New("Cookie in the wrong format, needs to be 32 chars: " + cookie) } cookiebin, err := rawCookie(cookie) if err != nil { return err } var values_p *C.char ptrSize := unsafe.Sizeof(values_p) // Assign each byte slice to its appropriate offset. var currentString string passedLength := len(values) for i := 0; i < len(cons.ofields); i++ { element := (**C.char)(unsafe.Pointer(uintptr(cons.valuePtr) + uintptr(i)*ptrSize)) if i+1 <= passedLength { currentString = values[i] } else { currentString = "" } cvalues := C.CString(currentString) defer C.free(unsafe.Pointer(cvalues)) cons.valueLengths[i] = C.uint64_t(len(currentString)) *element = cvalues } valueLengthsPtr := (*C.uint64_t)(unsafe.Pointer(&cons.valueLengths[0])) err1 := C.tdb_cons_add(cons.cons, cookiebin, C.uint64_t(timestamp), cons.valuePtr, valueLengthsPtr) if err1 != 0 { return errors.New(errToString(err1)) } return nil }
func (s *Session) setOrUpdateIndexes(operation int, key string, indexes map[string]string) <-chan Indexer { ekey, err := NewKey(key) if err != nil { panic(err) } defer ekey.Free() responseCh := make(chan Indexer, defaultVOLUME) var cindexes []*C.char var cdatas []C.struct_go_data_pointer for index, data := range indexes { cindex := C.CString(index) // free this defer C.free(unsafe.Pointer(cindex)) cindexes = append(cindexes, cindex) cdata := C.new_data_pointer( C.CString(data), // freed by ellipics::data_pointer in std::vector ??? C.int(len(data)), ) cdatas = append(cdatas, cdata) } onResultContext := NextContext() onFinishContext := NextContext() onResult := func() { //It's never called. For the future. } onFinish := func(err error) { if err != nil { responseCh <- &indexResult{err: err} } close(responseCh) Pool.Delete(onResultContext) Pool.Delete(onFinishContext) } Pool.Store(onResultContext, onResult) Pool.Store(onFinishContext, onFinish) // TODO: Reimplement this with pointer on functions switch operation { case indexesSet: C.session_set_indexes(s.session, C.context_t(onResultContext), C.context_t(onFinishContext), ekey.key, (**C.char)(&cindexes[0]), (*C.struct_go_data_pointer)(&cdatas[0]), C.uint64_t(len(cindexes))) case indexesUpdate: C.session_update_indexes(s.session, C.context_t(onResultContext), C.context_t(onFinishContext), ekey.key, (**C.char)(&cindexes[0]), (*C.struct_go_data_pointer)(&cdatas[0]), C.uint64_t(len(cindexes))) } return responseCh }
func (s *Session) IteratorStart(id *DnetRawID, ranges []DnetIteratorRange, itype uint64, iflags uint64, timeFrame ...time.Time) <-chan IteratorResult { ekey, onResultContext, onFinishContext, responseCh, err := iteratorHelper(id) if err != nil { return responseCh } defer ekey.Free() var ctime_begin, ctime_end C.struct_dnet_time if err := adjustTimeFrame(&ctime_begin, &ctime_end, timeFrame...); err != nil { context, pool_err := Pool.Get(onFinishContext) if pool_err != nil { panic("Unable to find session number") } context.(func(error))(err) return responseCh } if len(timeFrame) != 0 { iflags |= DNET_IFLAGS_TS_RANGE } iflags |= DNET_IFLAGS_KEY_RANGE cranges := convertRanges(ranges) C.session_start_iterator(s.session, C.context_t(onResultContext), C.context_t(onFinishContext), (*C.struct_go_iterator_range)(&cranges[0]), C.size_t(len(cranges)), ekey.key, C.uint64_t(itype), C.uint64_t(iflags), ctime_begin, ctime_end) return responseCh }
func Write(m message.Message) ([]byte, error) { var b C.buf_t m2 := C.messageNew() switch m1 := m.(type) { case *message.Setup: m2.mtype = C.Setup s := (*C.struct_Setup)(unsafe.Pointer(ptr(m2.u[:]))) s.ver_min = C.uint32_t(m1.Versions.Min) s.ver_max = C.uint32_t(m1.Versions.Max) for i, x := range m1.PeerNaClPublicKey { s.PeerNaClPublicKey[i] = C.uchar(x) } s.mtu = C.uint64_t(m1.Mtu) s.sharedTokens = C.uint64_t(m1.SharedTokens) default: panic("not impl yet") } err := C.messageAppend(m2, &b) if err != C.ERR_OK { return nil, GoError(err) } out := C.GoBytes(unsafe.Pointer(b.buf), C.int(b.len)) C.bufDealloc(&b) return out, nil }
func main() { r := C.uint64_t(math.Sqrt(number)) var s C.size_t pp := C.primesieve_generate_primes(0, r, &s, C.UINT64_PRIMES) hdr := reflect.SliceHeader{ Data: uintptr(pp), Len: int(s), Cap: int(s), } ps := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) //Forgive this criminal hackery n := C.uint64_t(number) for _, p := range ps { if p > r { break } if n%p == 0 { n /= p r = C.uint64_t(math.Sqrt(float64(n))) } } C.primesieve_free(unsafe.Pointer(pp)) fmt.Println(n) }
func (p *ProcessDesc) SetRlimit(resource int, soft uint64, hard uint64) error { ret := C.libct_process_desc_set_rlimit(p.desc, C.int(resource), C.uint64_t(soft), C.uint64_t(hard)) if ret < 0 { return LibctError{int(ret)} } return nil }
// SetAllocationHint sets the expected object size and expected write size of an object. As per latest doc, this may not // actually do anything. func (o *Object) SetAllocationHint(expectedObjectSize, expectedWriteSize uint64) { oid := C.CString(o.name) defer freeString(oid) es := C.uint64_t(expectedObjectSize) ews := C.uint64_t(expectedWriteSize) C.rados_set_alloc_hint(o.ioContext, oid, es, ews) }
// Verify checks whether the block's nonce is valid. func (l *Light) Verify(block pow.Block) bool { // TODO: do ethash_quick_verify before getCache in order // to prevent DOS attacks. var ( blockNum = block.NumberU64() difficulty = block.Difficulty() cache = l.getCache(blockNum) dagSize = C.ethash_get_datasize(C.uint64_t(blockNum)) ) if l.test { dagSize = dagSizeForTesting } if blockNum >= epochLength*2048 { glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048) return false } // Recompute the hash using the cache. hash := hashToH256(block.HashNoNonce()) ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce())) if !ret.success { return false } // Make sure cache is live until after the C call. // This is important because a GC might happen and execute // the finalizer before the call completes. _ = cache // The actual check. target := new(big.Int).Div(minDifficulty, difficulty) return h256ToHash(ret.result).Big().Cmp(target) <= 0 }
func Gentoken(key uint64) (token uint64, secret uint64) { random := uint64(C.randomint64()) token = uint64(C.exchange(C.uint64_t(C.uint64_t(random)))) secret = uint64(C.secret(C.uint64_t(key), C.uint64_t(random))) Debug("random:%x, token:%x, secret:%x\n", random, token, secret) return }
func (s *Session) SetTimestamp(ts time.Time) { dtime := C.struct_dnet_time{ tsec: C.uint64_t(ts.Unix()), tnsec: C.uint64_t(ts.Nanosecond()), } C.session_set_timestamp(s.session, &dtime) }
func NewDataLogger(output string, scale_when uint64, scale_data uint64) (*DataLogger, error) { o := C.CString(output) dl, err := C.ygor_data_logger_create(o, C.uint64_t(scale_when), C.uint64_t(scale_data)) if dl == nil { return nil, err } return &DataLogger{dl}, nil }
func (db *DB) SetTile(x uint64, y uint64, data []byte) error { rc := C.tc_set_tile(db.db, C.uint64_t(x), C.uint64_t(y), unsafe.Pointer(&data[0]), C.size_t(len(data))) if 0 != rc { errCStr := C.tc_last_error(db.db) return errors.New(C.GoString(errCStr)) } return nil }
func (c *CGoSystem) MapBuffer(handle uint32, offset, numBytes uint64, flags uint32) (result uint32, buf []byte) { var bufPtr unsafe.Pointer r := C.MojoMapBuffer(C.MojoHandle(handle), C.uint64_t(offset), C.uint64_t(numBytes), &bufPtr, C.MojoMapBufferFlags(flags)) if r != C.MOJO_RESULT_OK { return uint32(r), nil } return uint32(r), unsafeByteSlice(bufPtr, int(numBytes)) }
func (r *RocksDB) open() error { var ver storageVersion if len(r.dir) != 0 { log.Infof(context.TODO(), "opening rocksdb instance at %q", r.dir) // Check the version number. var err error if ver, err = getVersion(r.dir); err != nil { return err } if ver < versionMinimum || ver > versionCurrent { // Instead of an error, we should call a migration if possible when // one is needed immediately following the DBOpen call. return fmt.Errorf("incompatible rocksdb data version, current:%d, on disk:%d, minimum:%d", versionCurrent, ver, versionMinimum) } } else { if log.V(2) { log.Infof(context.TODO(), "opening in memory rocksdb instance") } // In memory dbs are always current. ver = versionCurrent } blockSize := envutil.EnvOrDefaultBytes("COCKROACH_ROCKSDB_BLOCK_SIZE", defaultBlockSize) walTTL := envutil.EnvOrDefaultDuration("COCKROACH_ROCKSDB_WAL_TTL", 0).Seconds() status := C.DBOpen(&r.rdb, goToCSlice([]byte(r.dir)), C.DBOptions{ cache: r.cache.cache, block_size: C.uint64_t(blockSize), wal_ttl_seconds: C.uint64_t(walTTL), allow_os_buffer: C.bool(true), logging_enabled: C.bool(log.V(3)), num_cpu: C.int(runtime.NumCPU()), max_open_files: C.int(r.maxOpenFiles), }) if err := statusToError(status); err != nil { return errors.Errorf("could not open rocksdb instance: %s", err) } // Update or add the version file if needed. if ver < versionCurrent { if err := writeVersionFile(r.dir); err != nil { return err } } // Start a goroutine that will finish when the underlying handle // is deallocated. This is used to check a leak in tests. go func() { <-r.deallocated }() return nil }
func (d *DataLogger) Record(series uint32, when uint64, data uint64) error { var dr C.struct_ygor_data_record dr.series = C.uint32_t(series) dr.when = C.uint64_t(when) dr.data = C.uint64_t(data) x, err := C.ygor_data_logger_record(d.dl, &dr) if x < 0 { return err } return nil }
func dmTaskAddTargetFct(task *CDmTask, start, size uint64, ttype, params string) int { Cttype := C.CString(ttype) defer free(Cttype) Cparams := C.CString(params) defer free(Cparams) return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) }
func (u *uc) HookAdd(htype int, cb interface{}, extra ...uint64) (Hook, error) { var callback unsafe.Pointer var iarg1 C.int var uarg1, uarg2 C.uint64_t rangeMode := false switch htype { case HOOK_BLOCK, HOOK_CODE: rangeMode = true callback = C.hookCode_cgo case HOOK_MEM_READ, HOOK_MEM_WRITE, HOOK_MEM_READ | HOOK_MEM_WRITE: rangeMode = true callback = C.hookMemAccess_cgo case HOOK_INTR: callback = C.hookInterrupt_cgo case HOOK_INSN: iarg1 = C.int(extra[0]) switch iarg1 { case X86_INS_IN: callback = C.hookX86In_cgo case X86_INS_OUT: callback = C.hookX86Out_cgo case X86_INS_SYSCALL, X86_INS_SYSENTER: callback = C.hookX86Syscall_cgo default: return 0, errors.New("Unknown instruction type.") } default: // special case for mask if htype&(HOOK_MEM_READ_UNMAPPED|HOOK_MEM_WRITE_UNMAPPED|HOOK_MEM_FETCH_UNMAPPED| HOOK_MEM_READ_PROT|HOOK_MEM_WRITE_PROT|HOOK_MEM_FETCH_PROT) != 0 { rangeMode = true callback = C.hookMemInvalid_cgo } else { return 0, errors.New("Unknown hook type.") } } var h2 C.uc_hook data := &HookData{u, cb} uptr := uintptr(unsafe.Pointer(data)) if rangeMode { if len(extra) == 2 { uarg1 = C.uint64_t(extra[0]) uarg2 = C.uint64_t(extra[1]) } else { uarg1, uarg2 = 1, 0 } C.uc_hook_add_u2(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), uarg1, uarg2) } else { C.uc_hook_add_i1(u.handle, &h2, C.uc_hook_type(htype), callback, C.uintptr_t(uptr), iarg1) } hookDataMap[uptr] = data hookToUintptr[Hook(h2)] = uptr return Hook(h2), nil }
// CreateBasicType creates basic type debug metadata. func (d *DIBuilder) CreateBasicType(t DIBasicType) Metadata { name := C.CString(t.Name) defer C.free(unsafe.Pointer(name)) result := C.LLVMDIBuilderCreateBasicType( d.ref, name, C.uint64_t(t.SizeInBits), C.uint64_t(t.AlignInBits), C.unsigned(t.Encoding), ) return Metadata{C: result} }
// Clone range. func (o *WriteOperation) CloneRange(dstOffset uint64, src string, srcOffset uint64, length uint64) error { if o.op == nil { return ErrAlreadyExecuted } csrc := C.CString(src) defer C.free(unsafe.Pointer(csrc)) C.radosext_write_op_clone_range(o.op, C.uint64_t(dstOffset), csrc, C.uint64_t(srcOffset), C.size_t(length)) return nil }
// CreateBasicType creates basic type debug metadata. func (d *DIBuilder) CreatePointerType(t DIPointerType) Metadata { name := C.CString(t.Name) defer C.free(unsafe.Pointer(name)) result := C.LLVMDIBuilderCreatePointerType( d.ref, t.Pointee.C, C.uint64_t(t.SizeInBits), C.uint64_t(t.AlignInBits), name, ) return Metadata{C: result} }
// Clone a range between two objects. func (c *Context) CloneRange(dst string, dstOffset uint64, src string, srcOffset uint64, length uint64) error { cdst := C.CString(dst) defer C.free(unsafe.Pointer(cdst)) csrc := C.CString(src) defer C.free(unsafe.Pointer(csrc)) if cerr := C.rados_clone_range(c.ctx, cdst, C.uint64_t(dstOffset), csrc, C.uint64_t(srcOffset), C.size_t(length)); cerr < 0 { return radosReturnCodeError(cerr) } return nil }
// CreateBasicType creates basic type debug metadata. func (d *DIBuilder) CreatePointerType(t DIPointerType) Value { name := C.CString(t.Name) result := C.DIBuilderCreatePointerType( d.ref, t.Pointee.C, C.uint64_t(t.SizeInBits), C.uint64_t(t.AlignInBits), name, ) C.free(unsafe.Pointer(name)) return Value{C: result} }
// CreateBasicType creates basic type debug metadata. func (d *DIBuilder) CreateBasicType(t DIBasicType) Value { name := C.CString(t.Name) result := C.DIBuilderCreateBasicType( d.ref, name, C.uint64_t(t.SizeInBits), C.uint64_t(t.AlignInBits), C.unsigned(t.Encoding), ) C.free(unsafe.Pointer(name)) return Value{C: result} }
//WriteKey writes blob by Key. func (s *Session) WriteKey(key *Key, input io.Reader, offset, total_size uint64) <-chan Lookuper { responseCh := make(chan Lookuper, defaultVOLUME) onWriteContext := NextContext() onWriteFinishContext := NextContext() chunk_context := NextContext() onWriteResult := func(lookup *lookupResult) { responseCh <- lookup } onWriteFinish := func(err error) { if err != nil { responseCh <- &lookupResult{err: err} } close(responseCh) Pool.Delete(onWriteContext) Pool.Delete(onWriteFinishContext) Pool.Delete(chunk_context) } chunk, err := ioutil.ReadAll(input) if err != nil { responseCh <- &lookupResult{err: err} close(responseCh) return responseCh } if len(chunk) == 0 { responseCh <- &lookupResult{ err: &DnetError{ Code: -22, Flags: 0, Message: "Invalid zero-length write request", }, } close(responseCh) return responseCh } Pool.Store(onWriteContext, onWriteResult) Pool.Store(onWriteFinishContext, onWriteFinish) Pool.Store(chunk_context, chunk) C.session_write_data(s.session, C.context_t(onWriteContext), C.context_t(onWriteFinishContext), key.key, C.uint64_t(offset), (*C.char)(unsafe.Pointer(&chunk[0])), C.uint64_t(len(chunk))) return responseCh }
// CreateArrayType creates struct type debug metadata. func (d *DIBuilder) CreateArrayType(t DIArrayType) Metadata { subscriptsSlice := make([]Metadata, len(t.Subscripts)) for i, s := range t.Subscripts { subscriptsSlice[i] = d.getOrCreateSubrange(s.Lo, s.Count) } subscripts := d.getOrCreateArray(subscriptsSlice) result := C.LLVMDIBuilderCreateArrayType( d.ref, C.uint64_t(t.SizeInBits), C.uint64_t(t.AlignInBits), t.ElementType.C, subscripts.C, ) return Metadata{C: result} }
// ReadAt reads len(data) bytes from the given RADOS object at the byte // offset off. It returns the number of bytes read and the error, if any. // ReadAt always returns a non-nil error when n < len(data). // At the end of file, that error is io.EOF. // // This function adopted from the Go os.ReadAt() function. func (o *Object) ReadAt(data []byte, off int64) (n int, err error) { cname := C.CString(o.name) defer C.free(unsafe.Pointer(cname)) for len(data) > 0 { cdata, cdatalen := byteSliceToBuffer(data) coff := C.uint64_t(off) cerr := C.rados_read(o.c.ctx, cname, cdata, cdatalen, coff) if cerr == 0 { return n, io.EOF } if cerr < 0 { err = fmt.Errorf("RADOS read %s: %s", o.name, strerror(cerr)) break } n += int(cerr) data = data[cerr:] off += int64(cerr) } return }
func (soc *Socket) setUInt64(opt C.int, value uint64) error { val := C.uint64_t(value) if i, err := C.zmq_setsockopt(soc.soc, opt, unsafe.Pointer(&val), C.size_t(unsafe.Sizeof(val))); i != 0 { return errget(err) } return nil }