func Test2iScanRange(t *testing.T) { c.LogIgnore() //c.SetLogLevel(c.LogLevelDebug) low, high := value.NewValue("aaaa"), value.NewValue("zzzz") span := &datastore.Span{ Range: &datastore.Range{ Low: value.Values{low}, High: value.Values{high}, Inclusion: datastore.BOTH, }, } conn := datastore.NewIndexConnection(nil) entrych := conn.EntryChannel() quitch := conn.StopChannel() go index.Scan("", span, false, 10000, conn) count := 0 loop: for { select { case _, ok := <-entrych: if !ok { break loop } count++ case <-quitch: break loop } } if count != 20000 { t.Fatal("failed ScanRange() - ", count) } }
func Test2iScanEntries(t *testing.T) { c.LogIgnore() //c.SetLogLevel(c.LogLevelDebug) conn := datastore.NewIndexConnection(nil) entrych := conn.EntryChannel() quitch := conn.StopChannel() go index.ScanEntries(10000, conn) count := 0 loop: for { select { case _, ok := <-entrych: if !ok { break loop } count++ case <-quitch: break loop } } if count != 20000 { t.Fatal("failed ScanEntries() - ", count) } }
// Helper function to perform a primary index scan on the given keyspace. Returns a map of // all primary key names. func doPrimaryIndexScan(t *testing.T, b datastore.Keyspace) (m map[string]bool, excp errors.Error) { conn := datastore.NewIndexConnection(&testingContext{t}) m = map[string]bool{} nitems, excp := b.Count() if excp != nil { t.Fatalf("failed to get keyspace count") return } indexers, excp := b.Indexers() if excp != nil { t.Fatalf("failed to retrieve indexers") return } pindexes, excp := indexers[0].PrimaryIndexes() if excp != nil || len(pindexes) < 1 { t.Fatalf("failed to retrieve primary indexes") return } idx := pindexes[0] go idx.ScanEntries("", nitems, datastore.UNBOUNDED, nil, conn) for { v, ok := <-conn.EntryChannel() if !ok { // Channel closed => Scan complete return } m[v.PrimaryKey] = true } }
func (this *spanScan) RunOnce(context *Context, parent value.Value) { this.once.Do(func() { defer context.Recover() // Recover from any panic defer close(this.itemChannel) // Broadcast that I have stopped defer this.notify() // Notify that I have stopped conn := datastore.NewIndexConnection(context) defer notifyConn(conn) // Notify index that I have stopped var duration time.Duration timer := time.Now() defer context.AddPhaseTime("scan", time.Since(timer)-duration) go this.scan(context, conn) var entry *datastore.IndexEntry ok := true for ok { select { case <-this.stopChannel: return default: } select { case entry, ok = <-conn.EntryChannel(): t := time.Now() if ok { cv := value.NewScopeValue(make(map[string]interface{}), parent) av := value.NewAnnotatedValue(cv) meta := map[string]interface{}{"id": entry.PrimaryKey} av.SetAttachment("meta", meta) covers := this.plan.Covers() if len(covers) > 0 { for i, c := range covers { if i == 0 { av.SetCover(c.Text(), value.NewValue(entry.PrimaryKey)) } else { av.SetCover(c.Text(), entry.EntryKey[i-1]) } } av.SetField(this.plan.Term().Alias(), av) } ok = this.sendItem(av) } duration += time.Since(t) case <-this.stopChannel: return } } }) }
func (this *IndexJoin) processItem(item value.AnnotatedValue, context *Context) bool { idv, e := this.plan.IdExpr().Evaluate(item, context) if e != nil { context.Error(errors.NewEvaluationError(e, fmt.Sprintf("JOIN FOR %s", this.plan.For()))) return false } found, foundOne := false, false if idv.Type() == value.STRING { var wg sync.WaitGroup defer wg.Wait() id := idv.Actual().(string) conn := datastore.NewIndexConnection(context) defer notifyConn(conn.StopChannel()) // Notify index that I have stopped wg.Add(1) go this.scan(id, context, conn, &wg) var entry *datastore.IndexEntry ok := true for ok { select { case <-this.stopChannel: return false default: } select { case entry, ok = <-conn.EntryChannel(): t := time.Now() if ok { foundOne, ok = this.joinEntry(item, entry, context) found = found || foundOne } this.duration += time.Since(t) case <-this.stopChannel: return false } } } return found || !this.plan.Outer() || this.sendItem(item) }
func (this *IndexNest) processItem(item value.AnnotatedValue, context *Context) bool { idv, e := this.plan.IdExpr().Evaluate(item, context) if e != nil { context.Error(errors.NewEvaluationError(e, fmt.Sprintf("NEST FOR %s", this.plan.For()))) return false } var entry *datastore.IndexEntry entries := _INDEX_ENTRY_POOL.Get() defer _INDEX_ENTRY_POOL.Put(entries) if idv.Type() == value.STRING { var wg sync.WaitGroup defer wg.Wait() id := idv.Actual().(string) conn := datastore.NewIndexConnection(context) defer notifyConn(conn) // Notify index that I have stopped wg.Add(1) go this.scan(id, context, conn, &wg) ok := true for ok { select { case <-this.stopChannel: return false default: } select { case entry, ok = <-conn.EntryChannel(): if ok { entries = append(entries, entry) } case <-this.stopChannel: return false } } } return this.nestEntries(item, entries, context) }
func (this *PrimaryScan) newIndexConnection(context *Context) *datastore.IndexConnection { var conn *datastore.IndexConnection // Use keyspace count to create a sized index connection keyspace := this.plan.Keyspace() size, err := keyspace.Count() if err == nil { if size <= 0 { size = 1 } conn, err = datastore.NewSizedIndexConnection(size, context) conn.SetPrimary() } // Use non-sized API and log error if err != nil { conn = datastore.NewIndexConnection(context) conn.SetPrimary() logging.Errorp("PrimaryScan.newIndexConnection ", logging.Pair{"error", err}) } return conn }
// Helper function to scan the primary index of given keyspace with given span func doIndexScan(t *testing.T, b datastore.Keyspace, span *datastore.Span) ( e []*datastore.IndexEntry, excp errors.Error) { conn := datastore.NewIndexConnection(&testingContext{t}) e = []*datastore.IndexEntry{} nitems, excp := b.Count() if excp != nil { t.Fatalf("failed to get keyspace count") return } indexers, excp := b.Indexers() if excp != nil { t.Fatalf("failed to retrieve indexers") return } idx, excp := indexers[0].IndexByName("#primary") if excp != nil { t.Fatalf("failed to retrieve primary index") return } go idx.Scan("", span, false, nitems, datastore.UNBOUNDED, nil, conn) for { entry, ok := <-conn.EntryChannel() if !ok { return } e = append(e, entry) } return }
func TestServer(t *testing.T) { logger, _ := log_resolver.NewLogger("golog") if logger == nil { t.Fatalf("Invalid logger") } logging.SetLogger(logger) site, err := NewDatastore(TEST_URL) if err != nil { t.Skipf("SKIPPING TEST: %v", err) } namespaceNames, err := site.NamespaceNames() if err != nil { t.Fatalf("Failed to get Namespace names . error %v", err) } fmt.Printf("Namespaces in this instance %v", namespaceNames) namespace, err := site.NamespaceByName("default") if err != nil { t.Fatalf("Namespace default not found, error %v", err) } keyspaceNames, err := namespace.KeyspaceNames() if err != nil { t.Fatalf(" Cannot fetch keyspaces names. error %v", err) } fmt.Printf("Keyspaces in this namespace %v", keyspaceNames) //connect to beer-sample ks, err := namespace.KeyspaceByName("beer-sample") if err != nil { t.Fatalf(" Cannot connect to beer-sample. Error %v", err) return } indexer, err := ks.Indexer(datastore.VIEW) if err != nil { fmt.Printf("No indexers found") return } // try create a primary index index, err := indexer.CreatePrimaryIndex("", "#primary", nil) if err != nil { // keep going. maybe index already exists fmt.Printf(" Cannot create a primary index on bucket. Error %v", err) } else { fmt.Printf("primary index created %v", index) } pair, errs := ks.Fetch([]string{"357", "aass_brewery"}) if errs != nil { t.Fatalf(" Cannot fetch keys errors %v", errs) } fmt.Printf("Keys fetched %v", pair) insertKey := datastore.Pair{Key: "testBeerKey", Value: value.NewValue(("This is a random test key-value"))} _, err = ks.Insert([]datastore.Pair{insertKey}) if err != nil { t.Fatalf("Cannot insert key %v", insertKey) } deleted, err := ks.Delete([]string{insertKey.Key}) if err != nil || (len(deleted) != 1 && deleted[0] != insertKey.Key) { t.Fatalf("Failed to delete %v", err) } pi, err := indexer.PrimaryIndexes() if err != nil || len(pi) < 1 { fmt.Printf("No primary index found") return } //fmt.Printf(" got primary index %s", pi.name) conn := datastore.NewIndexConnection(nil) go pi[0].ScanEntries("", math.MaxInt64, datastore.UNBOUNDED, nil, conn) var entry *datastore.IndexEntry ok := true for ok { select { case entry, ok = <-conn.EntryChannel(): if ok { fmt.Printf("\n primary key %v", entry.PrimaryKey) } } } }
func TestFile(t *testing.T) { store, err := NewDatastore("../../test/filestore/json") if err != nil { t.Fatalf("failed to create store: %v", err) } namespaceIds, err := store.NamespaceIds() if err != nil { t.Errorf("failed to get namespace ids: %v", err) } if len(namespaceIds) != 1 || namespaceIds[0] != "default" { t.Errorf("expected 1 namespace id'd default") } namespace, err := store.NamespaceById("default") if err != nil { t.Errorf("failed to get namespace: %v", err) } namespaceNames, err := store.NamespaceNames() if err != nil { t.Errorf("failed to get namespace names: %v", err) } if len(namespaceNames) != 1 || namespaceNames[0] != "default" { t.Errorf("expected 1 namespace named json") } fmt.Printf("Found namespaces %v", namespaceNames) namespace, err = store.NamespaceByName("default") if err != nil { t.Fatalf("failed to get namespace: %v", err) } ks, err := namespace.KeyspaceIds() if err != nil { t.Errorf("failed to get keyspace ids: %v", err) } fmt.Printf("Keyspace ids %v", ks) keyspace, err := namespace.KeyspaceById("contacts") if err != nil { t.Errorf("failed to get keyspace by id: contacts") } _, err = namespace.KeyspaceNames() if err != nil { t.Errorf("failed to get keyspace names: %v", err) } keyspace, err = namespace.KeyspaceByName("contacts") if err != nil { t.Fatalf("failed to get keyspace by name: contacts") } indexers, err := keyspace.Indexers() if err != nil { t.Errorf("failed to get indexers") } indexes, err := indexers[0].Indexes() if err != nil { t.Errorf("failed to get indexes") } if len(indexes) < 1 { t.Errorf("Expected at least 1 index for keyspace") } pindexes, err := indexers[0].PrimaryIndexes() if err != nil { t.Errorf("failed to get primary indexes") } if len(pindexes) < 1 { t.Errorf("Expected at least 1 primary index for keyspace") } index := pindexes[0] context := &testingContext{t} conn := datastore.NewIndexConnection(context) go index.ScanEntries("", math.MaxInt64, datastore.UNBOUNDED, nil, conn) ok := true for ok { entry, ok := <-conn.EntryChannel() if ok { fmt.Printf("\nScanned %s", entry.PrimaryKey) } else { break } } freds, errs := keyspace.Fetch([]string{"fred"}) if errs != nil || len(freds) == 0 { t.Errorf("failed to fetch fred: %v", errs) } // DML test cases fred := freds[0].Value var dmlKey datastore.Pair dmlKey.Key = "fred2" dmlKey.Value = fred _, err = keyspace.Insert([]datastore.Pair{dmlKey}) if err != nil { t.Errorf("failed to insert fred2: %v", err) } _, err = keyspace.Update([]datastore.Pair{dmlKey}) if err != nil { t.Errorf("failed to insert fred2: %v", err) } _, err = keyspace.Upsert([]datastore.Pair{dmlKey}) if err != nil { t.Errorf("failed to insert fred2: %v", err) } dmlKey.Key = "fred3" _, err = keyspace.Upsert([]datastore.Pair{dmlKey}) if err != nil { t.Errorf("failed to insert fred2: %v", err) } // negative cases _, err = keyspace.Insert([]datastore.Pair{dmlKey}) if err == nil { t.Errorf("Insert should not have succeeded for fred2") } // delete all the freds deleted, err := keyspace.Delete([]string{"fred2", "fred3"}) if err != nil && len(deleted) != 2 { fmt.Printf("Warning: Failed to delete. Error %v", err) } _, err = keyspace.Update([]datastore.Pair{dmlKey}) if err == nil { t.Errorf("Update should have failed. Key fred3 doesn't exist") } // finally upsert the key. this should work _, err = keyspace.Upsert([]datastore.Pair{dmlKey}) if err != nil { t.Errorf("failed to insert fred2: %v", err) } // some deletes should fail deleted, err = keyspace.Delete([]string{"fred2", "fred3"}) if len(deleted) != 1 && deleted[0] != "fred2" { t.Errorf("failed to delete fred2: %v, #deleted=%d", deleted, len(deleted)) } }
func FetchDocs(serverURL string, bucketName string) map[string]interface{} { logger, _ := log_resolver.NewLogger("golog") if logger == nil { log.Fatalf("Invalid logger") } logging.SetLogger(logger) site, err := couchbase.NewDatastore(serverURL) if err != nil { log.Fatalf("Cannot create datastore %v", err) } namespace, err := site.NamespaceByName("default") if err != nil { log.Fatalf("Namespace default not found, error %v", err) } ks, err := namespace.KeyspaceByName(bucketName) if err != nil { log.Fatalf(" Cannot connect to %s. Error %v", bucketName, err) } indexer, err := ks.Indexer(datastore.VIEW) if err != nil { log.Fatalf("No view indexer found %v", err) } // try create a primary index index, err := indexer.CreatePrimaryIndex("", "#primary", nil) if err != nil { // keep going. maybe index already exists log.Printf(" Cannot create a primary index on bucket. Error %v", err) pi, err := indexer.PrimaryIndexes() if err != nil || len(pi) < 1 { log.Fatalf("No primary index found") } index = pi[0] } else { log.Printf("primary index created %v", index) } conn := datastore.NewIndexConnection(nil) go index.ScanEntries("", math.MaxInt64, datastore.UNBOUNDED, nil, conn) var entry *datastore.IndexEntry var fetchKeys = make([]string, 0, 1000) ok := true for ok { select { case entry, ok = <-conn.EntryChannel(): if ok { fetchKeys = append(fetchKeys, entry.PrimaryKey) } } } //fetch all the keys pairs, errs := ks.Fetch(fetchKeys) if errs != nil { log.Fatalf(" Failed to fetch keys %v", errs) } var keyMap = make(map[string]interface{}) for _, value := range pairs { keyMap[value.Key] = value.Value.Actual() } log.Printf("Got %v docs", len(keyMap)) return keyMap }