func (pi *storeIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	entry := datastore.IndexEntry{PrimaryKey: pi.keyspace.namespace.store.actualStore.Id()}
	conn.EntryChannel() <- &entry
}
Beispiel #2
0
// Scan implement Index{} interface.
func (si *secondaryIndex) Scan(
	requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector,
	conn *datastore.IndexConnection) {

	entryChannel := conn.EntryChannel()
	defer close(entryChannel)

	client := si.gsi.gsiClient
	if span.Seek != nil {
		seek := values2SKey(span.Seek)
		client.Lookup(
			si.defnID, []c.SecondaryKey{seek}, distinct, limit,
			n1ql2GsiConsistency[cons], vector2ts(vector),
			makeResponsehandler(client, conn))

	} else {
		low, high := values2SKey(span.Range.Low), values2SKey(span.Range.High)
		incl := n1ql2GsiInclusion[span.Range.Inclusion]
		client.Range(
			si.defnID, low, high, incl, distinct, limit,
			n1ql2GsiConsistency[cons], vector2ts(vector),
			makeResponsehandler(client, conn))
	}
}
func (pi *dualIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	entry := datastore.IndexEntry{PrimaryKey: KEYSPACE_NAME_DUAL}
	conn.EntryChannel() <- &entry
}
func (pi *requestLogIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())
	accounting.RequestsForeach(func(id string, entry *accounting.RequestLogEntry) {
		indexEntry := datastore.IndexEntry{PrimaryKey: id}
		conn.EntryChannel() <- &indexEntry
	})
}
func (pi *preparedsIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())
	names := plan.NamePrepareds()

	for _, name := range names {
		entry := datastore.IndexEntry{PrimaryKey: name}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #6
0
// Scan implement PrimaryIndex{} interface.
func (si *secondaryIndex) ScanEntries(
	requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {

	entryChannel := conn.EntryChannel()
	defer close(entryChannel)

	client := si.gsi.gsiClient
	client.ScanAll(
		si.defnID, limit,
		n1ql2GsiConsistency[cons], vector2ts(vector),
		makeResponsehandler(client, conn))
}
Beispiel #7
0
func (pi *primaryIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	if limit == 0 {
		limit = int64(pi.keyspace.nitems)
	}

	for i := 0; i < pi.keyspace.nitems && int64(i) < limit; i++ {
		entry := datastore.IndexEntry{PrimaryKey: strconv.Itoa(i)}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #8
0
func makeResponsehandler(
	client *qclient.GsiClient, conn *datastore.IndexConnection) qclient.ResponseHandler {

	entryChannel := conn.EntryChannel()
	stopChannel := conn.StopChannel()

	return func(data qclient.ResponseReader) bool {

		if err := data.Error(); err != nil {
			conn.Error(n1qlError(client, err))
			return false
		}
		skeys, pkeys, err := data.GetEntries()
		if err == nil {
			for i, skey := range skeys {
				// Primary-key is mandatory.
				e := &datastore.IndexEntry{
					PrimaryKey: string(pkeys[i]),
				}
				e.EntryKey = skey2Values(skey)

				fmsg := "current enqueued length: %d (max %d)\n"
				l.Tracef(fmsg, len(entryChannel), cap(entryChannel))
				select {
				case entryChannel <- e:
				case <-stopChannel:
					return false
				}
			}
			return true
		}
		conn.Error(errors.NewError(nil, err.Error()))
		return false
	}
}
func (pi *namespaceIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	namespaceIds, err := pi.keyspace.namespace.store.actualStore.NamespaceIds()
	if err == nil {
		for i, namespaceId := range namespaceIds {
			if limit > 0 && int64(i) > limit {
				break
			}

			entry := datastore.IndexEntry{PrimaryKey: namespaceId}
			conn.EntryChannel() <- &entry
		}
	}
}
Beispiel #10
0
func (pi *primaryIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	// For primary indexes, bounds must always be strings, so we
	// can just enforce that directly
	low, high := "", ""

	// Ensure that lower bound is a string, if any
	if len(span.Range.Low) > 0 {
		a := span.Range.Low[0].Actual()
		switch a := a.(type) {
		case string:
			low = a
		default:
			conn.Error(errors.NewOtherDatastoreError(nil, fmt.Sprintf("Invalid lower bound %v of type %T.", a, a)))
			return
		}
	}

	// Ensure that upper bound is a string, if any
	if len(span.Range.High) > 0 {
		a := span.Range.High[0].Actual()
		switch a := a.(type) {
		case string:
			high = a
		default:
			conn.Error(errors.NewOtherDatastoreError(nil, fmt.Sprintf("Invalid upper bound %v of type %T.", a, a)))
			return
		}
	}

	if limit == 0 {
		limit = int64(pi.keyspace.nitems)
	}

	for i := 0; i < pi.keyspace.nitems && int64(i) < limit; i++ {
		id := strconv.Itoa(i)

		if low != "" &&
			(id < low ||
				(id == low && (span.Range.Inclusion&datastore.LOW == 0))) {
			continue
		}

		low = ""

		if high != "" &&
			(id > high ||
				(id == high && (span.Range.Inclusion&datastore.HIGH == 0))) {
			break
		}

		entry := datastore.IndexEntry{PrimaryKey: id}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #11
0
func (this *spanScan) scan(context *Context, conn *datastore.IndexConnection) {
	defer context.Recover() // Recover from any panic

	dspan, err := evalSpan(this.span, context)
	if err != nil {
		context.Error(errors.NewEvaluationError(err, "span"))
		close(conn.EntryChannel())
		return
	}

	limit := int64(math.MaxInt64)
	if this.plan.Limit() != nil {
		lv, err := this.plan.Limit().Evaluate(nil, context)
		if err == nil && lv.Type() == value.NUMBER {
			limit = int64(lv.Actual().(float64))
		}
	}

	this.plan.Index().Scan(context.RequestId(), dspan, this.plan.Distinct(), limit,
		context.ScanConsistency(), context.ScanVector(), conn)
}
func (pi *keyspaceIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	val := ""

	a := span.Seek[0].Actual()
	switch a := a.(type) {
	case string:
		val = a
	default:
		conn.Error(errors.NewSystemDatastoreError(nil, fmt.Sprintf("Invalid seek value %v of type %T.", a, a)))
		return
	}

	ids := strings.SplitN(val, "/", 2)
	if len(ids) != 2 {
		return
	}

	namespace, _ := pi.keyspace.namespace.store.actualStore.NamespaceById(ids[0])
	if namespace == nil {
		return
	}

	keyspace, _ := namespace.KeyspaceById(ids[1])
	if keyspace != nil {
		entry := datastore.IndexEntry{PrimaryKey: fmt.Sprintf("%s/%s", namespace.Id(), keyspace.Id())}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #13
0
func (this *PrimaryScan) newIndexConnection(context *Context) *datastore.IndexConnection {
	var conn *datastore.IndexConnection

	// Use keyspace count to create a sized index connection
	keyspace := this.plan.Keyspace()
	size, err := keyspace.Count()
	if err == nil {
		if size <= 0 {
			size = 1
		}

		conn, err = datastore.NewSizedIndexConnection(size, context)
		conn.SetPrimary()
	}

	// Use non-sized API and log error
	if err != nil {
		conn = datastore.NewIndexConnection(context)
		conn.SetPrimary()
		logging.Errorp("PrimaryScan.newIndexConnection ", logging.Pair{"error", err})
	}

	return conn
}
func (pi *indexIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	// eliminate duplicate keys
	keys := make(map[string]string, 64)

	actualStore := pi.keyspace.namespace.store.actualStore
	namespaceIds, err := actualStore.NamespaceIds()
	if err == nil {
		for _, namespaceId := range namespaceIds {
			namespace, err := actualStore.NamespaceById(namespaceId)
			if err == nil {
				keyspaceIds, err := namespace.KeyspaceIds()
				if err == nil {
					for _, keyspaceId := range keyspaceIds {
						keyspace, err := namespace.KeyspaceById(keyspaceId)
						if err == nil {
							indexers, err := keyspace.Indexers()
							if err == nil {
								for _, indexer := range indexers {
									err = indexer.Refresh()
									if err != nil {
										logging.Errorf("Refreshing indexes failed %v", err)
										conn.Error(errors.NewSystemDatastoreError(err, ""))
										// don't return here but continue processing, because other keyspaces may still be responsive. MB-15834
										continue
									}

									indexIds, err := indexer.IndexIds()
									if err == nil {
										for _, indexId := range indexIds {
											key := fmt.Sprintf("%s/%s/%s", namespaceId, keyspaceId, indexId)
											keys[key] = key
										}
									}
								}
							}
						}
					}
				}
			}
		}
	}

	for k, _ := range keys {
		entry := datastore.IndexEntry{PrimaryKey: k}
		conn.EntryChannel() <- &entry
	}
}
func (pi *indexIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	// eliminate duplicate keys
	keys := make(map[string]string, 64)

	actualStore := pi.keyspace.namespace.store.actualStore
	namespaceIds, err := actualStore.NamespaceIds()
	if err == nil {
		for _, namespaceId := range namespaceIds {
			namespace, err := actualStore.NamespaceById(namespaceId)
			if err == nil {
				keyspaceIds, err := namespace.KeyspaceIds()
				if err == nil {
					for _, keyspaceId := range keyspaceIds {
						keyspace, err := namespace.KeyspaceById(keyspaceId)
						if err == nil {
							indexers, err := keyspace.Indexers()
							if err == nil {
								for _, indexer := range indexers {
									err = indexer.Refresh()
									if err != nil {
										conn.Error(errors.NewSystemDatastoreError(err, ""))
										return
									}

									indexIds, err := indexer.IndexIds()
									if err == nil {
										for _, indexId := range indexIds {
											key := fmt.Sprintf("%s/%s/%s", namespaceId, keyspaceId, indexId)
											keys[key] = key
										}
									}
								}
							}
						}
					}
				}
			}
		}
	}

	for k, _ := range keys {
		entry := datastore.IndexEntry{PrimaryKey: k}
		conn.EntryChannel() <- &entry
	}
}
func (pi *activeRequestsIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())
	numRequests, err := server.ActiveRequestsCount()
	if err != nil {
		conn.Error(err)
		return
	}
	requestIds := make([]string, numRequests)
	server.ActiveRequestsForEach(func(id string, request server.Request) {
		requestIds = append(requestIds, id)
	})

	for _, name := range requestIds {
		entry := datastore.IndexEntry{PrimaryKey: name}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #17
0
func (pi *dualIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	val := ""

	a := span.Seek[0].Actual()
	switch a := a.(type) {
	case string:
		val = a
	default:
		conn.Error(errors.NewSystemDatastoreError(nil, fmt.Sprintf("Invalid seek value %v of type %T.", a, a)))
		return
	}

	if strings.EqualFold(val, KEYSPACE_NAME_DUAL) {
		entry := datastore.IndexEntry{PrimaryKey: KEYSPACE_NAME_DUAL}
		conn.EntryChannel() <- &entry
	}
}
Beispiel #18
0
func (pi *primaryIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency,
	vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	dirEntries, er := ioutil.ReadDir(pi.keyspace.path())
	if er != nil {
		conn.Error(errors.NewFileDatastoreError(er, ""))
		return
	}

	for i, dirEntry := range dirEntries {
		if limit > 0 && int64(i) > limit {
			break
		}
		if !dirEntry.IsDir() {
			entry := datastore.IndexEntry{PrimaryKey: documentPathToId(dirEntry.Name())}
			conn.EntryChannel() <- &entry
		}
	}
}
Beispiel #19
0
func notifyConn(conn *datastore.IndexConnection) {
	select {
	case conn.StopChannel() <- false:
	default:
	}
}
func (pi *activeRequestsIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())
	// NOP
}
Beispiel #21
0
func (vi *viewIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	// For primary indexes, bounds must always be strings, so we
	// can just enforce that directly

	viewOptions := map[string]interface{}{}
	viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */
	viewRowChannel := make(chan cb.ViewRow)
	viewErrChannel := make(chan errors.Error)
	doneChannel := make(chan bool)
	defer close(doneChannel)

	go WalkViewInBatches(viewRowChannel, viewErrChannel, doneChannel, vi.keyspace.cbbucket,
		vi.DDocName(), vi.ViewName(), vi.IsPrimary(), viewOptions, _BATCH_SIZE, limit)

	var viewRow cb.ViewRow
	var err errors.Error
	sentRows := false
	ok := true
	numRows := 0
	errs := make([]error, 0, 10)
	for ok {
		select {
		case viewRow, ok = <-viewRowChannel:
			if ok {
				entry := datastore.IndexEntry{PrimaryKey: viewRow.ID}

				// try to add the view row key as the entry key (unless this is _all_docs)
				if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ {
					lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key)
					if err == nil {
						entry.EntryKey = lookupValue
					} else {
						errs = append(errs, fmt.Errorf("unable to convert index key to lookup value err:%v key %v", err, entry))
					}
				}
				select {
				case conn.EntryChannel() <- &entry:
					sentRows = true
					numRows++
				case <-conn.StopChannel():
					logging.Debugf(" Asked to stop after sending %v rows", numRows)
					ok = false
				}
			}
		case err, ok = <-viewErrChannel:
			if err != nil {
				logging.Errorf("%v", err)
				// check to possibly detect a bucket that was already deleted
				if !sentRows {
					logging.Debugf("Checking bucket URI: %v", vi.keyspace.cbbucket.URI)
					_, err := http.Get(vi.keyspace.cbbucket.URI)
					if err != nil {
						logging.Errorf("%v", err)

						// remove this specific bucket from the pool cache
						vi.keyspace.namespace.lock.Lock()
						delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name())
						vi.keyspace.namespace.lock.Unlock()
						// close this bucket
						vi.keyspace.Release()
						// ask the pool to refresh
						vi.keyspace.namespace.refresh(true)
						// bucket doesnt exist any more
						conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name()+" or view index missing"))
						return
					}

				}

				conn.Error(err)
				return
			}
		}
	}

	if errs != nil {
		logging.Debugf("Errors with converting lookup value to entry key. num errrs %v", len(errs))
	}

	logging.Debugf("Number of entries fetched from the index %d", numRows)

}
Beispiel #22
0
func (pi *primaryIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64,
	cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) {
	defer close(conn.EntryChannel())

	// For primary indexes, bounds must always be strings, so we
	// can just enforce that directly
	low, high := "", ""

	// Ensure that lower bound is a string, if any
	if len(span.Range.Low) > 0 {
		a := span.Range.Low[0].Actual()
		switch a := a.(type) {
		case string:
			low = a
		default:
			conn.Error(errors.NewFileDatastoreError(nil, fmt.Sprintf("Invalid lower bound %v of type %T.", a, a)))
			return
		}
	}

	// Ensure that upper bound is a string, if any
	if len(span.Range.High) > 0 {
		a := span.Range.High[0].Actual()
		switch a := a.(type) {
		case string:
			high = a
		default:
			conn.Error(errors.NewFileDatastoreError(nil, fmt.Sprintf("Invalid upper bound %v of type %T.", a, a)))
			return
		}
	}

	dirEntries, er := ioutil.ReadDir(pi.keyspace.path())
	if er != nil {
		conn.Error(errors.NewFileDatastoreError(er, ""))
		return
	}

	var n int64 = 0
	for _, dirEntry := range dirEntries {

		fmt.Printf("Dir entry being scanned %v", dirEntry.Name())
		if limit > 0 && n > limit {
			break
		}

		id := documentPathToId(dirEntry.Name())

		if low != "" &&
			(id < low ||
				(id == low && (span.Range.Inclusion&datastore.LOW == 0))) {
			continue
		}

		low = ""

		if high != "" &&
			(id > high ||
				(id == high && (span.Range.Inclusion&datastore.HIGH == 0))) {
			break
		}

		if !dirEntry.IsDir() {
			entry := datastore.IndexEntry{PrimaryKey: id}
			conn.EntryChannel() <- &entry
			n++
		}
	}
}