示例#1
0
// ScanNode reads all records in specified namespace and set for one node only.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, error) {
	policy := *clnt.getUsableScanPolicy(apolicy)

	// results channel must be async for performance
	taskId := uint64(xornd.Int64())
	res := newRecordset(policy.RecordQueueSize, 1, taskId)

	go clnt.scanNode(&policy, node, res, namespace, setName, taskId, binNames...)
	return res, nil
}
// scanNodeObjects reads all records in specified namespace and set for one node only,
// and marshalls the results into the objects of the provided channel in Recordset.
// If the policy is nil, the default relevant policy will be used.
// The resulting records will be marshalled into the objChan.
// objChan will be closed after all the records are read.
func (clnt *Client) ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, error) {
	policy := *clnt.getUsableScanPolicy(apolicy)

	// results channel must be async for performance
	taskId := uint64(xornd.Int64())
	os := newObjectset(reflect.ValueOf(objChan), 1, taskId)
	res := &Recordset{
		objectset: *os,
	}

	go clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...)
	return res, nil
}
// ScanAllObjects reads all records in specified namespace and set from all nodes.
// If the policy's concurrentNodes is specified, each server node will be read in
// parallel. Otherwise, server nodes are read sequentially.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, error) {
	policy := *clnt.getUsableScanPolicy(apolicy)

	nodes := clnt.cluster.GetNodes()
	if len(nodes) == 0 {
		return nil, NewAerospikeError(SERVER_NOT_AVAILABLE, "Scan failed because cluster is empty.")
	}

	if policy.WaitUntilMigrationsAreOver {
		// wait until all migrations are finished
		if err := clnt.cluster.WaitUntillMigrationIsFinished(policy.Timeout); err != nil {
			return nil, err
		}
	}

	// result recordset
	taskId := uint64(xornd.Int64())
	os := newObjectset(reflect.ValueOf(objChan), len(nodes), taskId)
	res := &Recordset{
		objectset: *os,
	}

	// the whole call should be wrapped in a goroutine
	if policy.ConcurrentNodes {
		for _, node := range nodes {
			go func(node *Node) {
				if err := clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...); err != nil {
					res.sendError(err)
				}
			}(node)
		}
	} else {
		// scan nodes one by one
		go func() {
			for _, node := range nodes {
				if err := clnt.scanNodeObjects(&policy, node, res, namespace, setName, taskId, binNames...); err != nil {
					res.sendError(err)
					continue
				}
			}
		}()
	}

	return res, nil
}
func Benchmark_xor_rand_fast_pool(b *testing.B) {
	for i := 0; i < b.N; i++ {
		xor.Int64()
	}
}