func (pi *storeIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) entry := datastore.IndexEntry{PrimaryKey: pi.keyspace.namespace.store.actualStore.Id()} conn.EntryChannel() <- &entry }
func (pi *dualIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) entry := datastore.IndexEntry{PrimaryKey: KEYSPACE_NAME_DUAL} conn.EntryChannel() <- &entry }
func (this *spanScan) scan(context *Context, conn *datastore.IndexConnection) { defer context.Recover() // Recover from any panic dspan, err := evalSpan(this.span, context) if err != nil { context.Error(errors.NewError(err, "Error evaluating span.")) close(conn.EntryChannel()) return } this.plan.Index().Scan(dspan, this.plan.Distinct(), this.plan.Limit(), context.ScanConsistency(), context.ScanVector(), conn) }
func (pi *primaryIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) if limit == 0 { limit = int64(pi.keyspace.nitems) } for i := 0; i < pi.keyspace.nitems && int64(i) < limit; i++ { entry := datastore.IndexEntry{PrimaryKey: strconv.Itoa(i)} conn.EntryChannel() <- &entry } }
func (pi *namespaceIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) namespaceIds, err := pi.keyspace.namespace.store.actualStore.NamespaceIds() if err == nil { for i, namespaceId := range namespaceIds { if limit > 0 && int64(i) > limit { break } entry := datastore.IndexEntry{PrimaryKey: namespaceId} conn.EntryChannel() <- &entry } } }
func (pi *primaryIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly low, high := "", "" // Ensure that lower bound is a string, if any if len(span.Range.Low) > 0 { a := span.Range.Low[0].Actual() switch a := a.(type) { case string: low = a default: conn.Error(errors.NewOtherDatastoreError(nil, fmt.Sprintf("Invalid lower bound %v of type %T.", a, a))) return } } // Ensure that upper bound is a string, if any if len(span.Range.High) > 0 { a := span.Range.High[0].Actual() switch a := a.(type) { case string: high = a default: conn.Error(errors.NewOtherDatastoreError(nil, fmt.Sprintf("Invalid upper bound %v of type %T.", a, a))) return } } if limit == 0 { limit = int64(pi.keyspace.nitems) } for i := 0; i < pi.keyspace.nitems && int64(i) < limit; i++ { id := strconv.Itoa(i) if low != "" && (id < low || (id == low && (span.Range.Inclusion&datastore.LOW == 0))) { continue } low = "" if high != "" && (id > high || (id == high && (span.Range.Inclusion&datastore.HIGH == 0))) { break } entry := datastore.IndexEntry{PrimaryKey: id} conn.EntryChannel() <- &entry } }
func (pi *keyspaceIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) val := "" a := span.Seek[0].Actual() switch a := a.(type) { case string: val = a default: conn.Error(errors.NewSystemDatastoreError(nil, fmt.Sprintf("Invalid seek value %v of type %T.", a, a))) return } ids := strings.SplitN(val, "/", 2) if len(ids) != 2 { return } namespace, _ := pi.keyspace.namespace.store.actualStore.NamespaceById(ids[0]) if namespace == nil { return } keyspace, _ := namespace.KeyspaceById(ids[1]) if keyspace != nil { entry := datastore.IndexEntry{PrimaryKey: fmt.Sprintf("%s/%s", namespace.Id(), keyspace.Id())} conn.EntryChannel() <- &entry } }
func (pi *indexIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // eliminate duplicate keys keys := make(map[string]string, 64) actualStore := pi.keyspace.namespace.store.actualStore namespaceIds, err := actualStore.NamespaceIds() if err == nil { for _, namespaceId := range namespaceIds { namespace, err := actualStore.NamespaceById(namespaceId) if err == nil { keyspaceIds, err := namespace.KeyspaceIds() if err == nil { for _, keyspaceId := range keyspaceIds { keyspace, err := namespace.KeyspaceById(keyspaceId) if err == nil { indexers, err := keyspace.Indexers() if err == nil { for _, indexer := range indexers { err = indexer.Refresh() if err != nil { conn.Error(errors.NewSystemDatastoreError(err, "")) return } indexIds, err := indexer.IndexIds() if err == nil { for _, indexId := range indexIds { key := fmt.Sprintf("%s/%s/%s", namespaceId, keyspaceId, indexId) keys[key] = key } } } } } } } } } } for k, _ := range keys { entry := datastore.IndexEntry{PrimaryKey: k} conn.EntryChannel() <- &entry } }
func (pi *storeIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) val := "" a := span.Seek[0].Actual() switch a := a.(type) { case string: val = a default: conn.Error(errors.NewSystemDatastoreError(nil, fmt.Sprintf("Invalid seek value %v of type %T.", a, a))) return } if strings.EqualFold(val, pi.keyspace.namespace.store.actualStore.Id()) { entry := datastore.IndexEntry{PrimaryKey: pi.keyspace.namespace.store.actualStore.Id()} conn.EntryChannel() <- &entry } }
func (pi *primaryIndex) ScanEntries(limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) dirEntries, er := ioutil.ReadDir(pi.keyspace.path()) if er != nil { conn.Error(errors.NewFileDatastoreError(er, "")) return } for i, dirEntry := range dirEntries { if limit > 0 && int64(i) > limit { break } if !dirEntry.IsDir() { entry := datastore.IndexEntry{PrimaryKey: documentPathToId(dirEntry.Name())} conn.EntryChannel() <- &entry } } }
func (vi *viewIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) go WalkViewInBatches(viewRowChannel, viewErrChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), viewOptions, 1000, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { logging.Debugf("unable to convert index key to lookup value err:%v key %v", err, viewRow.Key) } } conn.EntryChannel() <- &entry sentRows = true numRows++ } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Infof("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name())) return } } conn.Error(err) return } } } logging.Infof("Number of entries fetched from the index %d", numRows) }
func notifyConn(conn *datastore.IndexConnection) { select { case conn.StopChannel() <- false: default: } }
func (pi *primaryIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly low, high := "", "" // Ensure that lower bound is a string, if any if len(span.Range.Low) > 0 { a := span.Range.Low[0].Actual() switch a := a.(type) { case string: low = a default: conn.Error(errors.NewFileDatastoreError(nil, fmt.Sprintf("Invalid lower bound %v of type %T.", a, a))) return } } // Ensure that upper bound is a string, if any if len(span.Range.High) > 0 { a := span.Range.High[0].Actual() switch a := a.(type) { case string: high = a default: conn.Error(errors.NewFileDatastoreError(nil, fmt.Sprintf("Invalid upper bound %v of type %T.", a, a))) return } } dirEntries, er := ioutil.ReadDir(pi.keyspace.path()) if er != nil { conn.Error(errors.NewFileDatastoreError(er, "")) return } var n int64 = 0 for _, dirEntry := range dirEntries { fmt.Printf("Dir entry being scanned %v", dirEntry.Name()) if limit > 0 && n > limit { break } id := documentPathToId(dirEntry.Name()) if low != "" && (id < low || (id == low && (span.Range.Inclusion&datastore.LOW == 0))) { continue } low = "" if high != "" && (id > high || (id == high && (span.Range.Inclusion&datastore.HIGH == 0))) { break } if !dirEntry.IsDir() { entry := datastore.IndexEntry{PrimaryKey: id} conn.EntryChannel() <- &entry n++ } } }