// CreatePrimaryIndex implements datastore.Indexer{} interface. Create or // return a primary index on this keyspace func (gsi *gsiKeyspace) CreatePrimaryIndex( requestId, name string, with value.Value) (datastore.PrimaryIndex, errors.Error) { var withJSON []byte var err error if with != nil { if withJSON, err = with.MarshalJSON(); err != nil { return nil, errors.NewError(err, "GSI error marshalling WITH clause") } } defnID, err := gsi.gsiClient.CreateIndex( name, gsi.keyspace, /*bucket-name*/ string(c.ForestDB), /*using, by default always forestdb*/ "N1QL", /*exprType*/ "", /*partnStr*/ "", /*whereStr*/ nil, /*secStrs*/ true, /*isPrimary*/ withJSON) if err != nil { return nil, errors.NewError(err, "GSI CreatePrimaryIndex()") } // refresh to get back the newly created index. if err := gsi.Refresh(); err != nil { return nil, err } index, errr := gsi.IndexById(defnID2String(defnID)) if errr != nil { return nil, errr } return index.(datastore.PrimaryIndex), nil }
// BuildIndexes implements datastore.Indexer{} interface. func (gsi *gsiKeyspace) BuildIndexes(requestId string, names ...string) errors.Error { defnIDs := make([]uint64, len(names)) for i, name := range names { index, err := gsi.IndexByName(name) if err != nil { return errors.NewError(err, "BuildIndexes") } defnIDs[i] = string2defnID(index.Id()) } err := gsi.gsiClient.BuildIndexes(defnIDs) if err != nil { return errors.NewError(err, "BuildIndexes") } return nil }
// for metadata-provider. func newSecondaryIndexFromMetaData( gsi *gsiKeyspace, imd *mclient.IndexMetadata) (si *secondaryIndex, err errors.Error) { if len(imd.Instances) < 1 { return nil, errors.NewError(nil, "no instance are created by GSI") } instn, indexDefn := imd.Instances[0], imd.Definition defnID := uint64(indexDefn.DefnId) si = &secondaryIndex{ gsi: gsi, bucketn: indexDefn.Bucket, name: indexDefn.Name, defnID: defnID, isPrimary: indexDefn.IsPrimary, using: indexDefn.Using, partnExpr: indexDefn.PartitionKey, secExprs: indexDefn.SecExprs, whereExpr: indexDefn.WhereExpr, state: gsi2N1QLState[instn.State], err: instn.Error, deferred: indexDefn.Deferred, } return si, nil }
func makeResponsehandler( client *qclient.GsiClient, conn *datastore.IndexConnection) qclient.ResponseHandler { entryChannel := conn.EntryChannel() stopChannel := conn.StopChannel() return func(data qclient.ResponseReader) bool { if err := data.Error(); err != nil { conn.Error(n1qlError(client, err)) return false } skeys, pkeys, err := data.GetEntries() if err == nil { for i, skey := range skeys { // Primary-key is mandatory. e := &datastore.IndexEntry{ PrimaryKey: string(pkeys[i]), } e.EntryKey = skey2Values(skey) fmsg := "current enqueued length: %d (max %d)\n" l.Tracef(fmsg, len(entryChannel), cap(entryChannel)) select { case entryChannel <- e: case <-stopChannel: return false } } return true } conn.Error(errors.NewError(nil, err.Error())) return false } }
// NewGSIIndexer manage new set of indexes under namespace->keyspace, // also called as, pool->bucket. // will return an error when, // - GSI cluster is not available. // - network partitions / errors. func NewGSIIndexer( clusterURL, namespace, keyspace string) (datastore.Indexer, errors.Error) { l.SetLogLevel(l.Info) gsi := &gsiKeyspace{ clusterURL: clusterURL, namespace: namespace, keyspace: keyspace, indexes: make(map[uint64]*secondaryIndex), // defnID -> index primaryIndexes: make(map[uint64]*secondaryIndex), } gsi.logPrefix = fmt.Sprintf("GSIC[%s; %s]", namespace, keyspace) // get the singleton-client client, err := getSingletonClient(clusterURL) if err != nil { l.Errorf("%v GSI instantiation failed: %v", gsi.logPrefix, err) return nil, errors.NewError(err, "GSI client instantiation failed") } gsi.gsiClient = client // refresh indexes for this service->namespace->keyspace if err := gsi.Refresh(); err != nil { l.Errorf("%v Refresh() failed: %v", gsi.logPrefix, err) return nil, err } l.Debugf("%v instantiated ...", gsi.logPrefix) return gsi, nil }
func Run(mockServer *server.Server, q string) ([]interface{}, []errors.Error, errors.Error) { var metrics value.Tristate scanConfiguration := &scanConfigImpl{} base := server.NewBaseRequest(q, nil, nil, nil, "json", 0, value.FALSE, metrics, value.TRUE, scanConfiguration, "", nil) mr := &MockResponse{ results: []interface{}{}, warnings: []errors.Error{}, done: make(chan bool), } query := &MockQuery{ BaseRequest: *base, response: mr, } select { case mockServer.Channel() <- query: // Wait until the request exits. <-query.CloseNotify() default: // Timeout. return nil, nil, errors.NewError(nil, "Query timed out") } // wait till all the results are ready <-mr.done return mr.results, mr.warnings, mr.err }
// Synchronise gsi client with the servers and refresh the indexes list. func (gsi *gsiKeyspace) SyncRefresh() errors.Error { err := gsi.gsiClient.Sync() if err != nil { return errors.NewError(err, "GSI SyncRefresh()") } return gsi.Refresh() }
// get cluster info and refresh ns-server data. func getClusterInfo( cluster string, pooln string) (*c.ClusterInfoCache, errors.Error) { clusterURL, err := c.ClusterAuthUrl(cluster) if err != nil { return nil, errors.NewError(err, fmt.Sprintf("ClusterAuthUrl() failed")) } cinfo, err := c.NewClusterInfoCache(clusterURL, pooln) if err != nil { return nil, errors.NewError(err, fmt.Sprintf("ClusterInfo() failed")) } if err := cinfo.Fetch(); err != nil { msg := fmt.Sprintf("Fetch ClusterInfo() failed") return nil, errors.NewError(err, msg) } return cinfo, nil }
// CreateIndex implements datastore.Indexer{} interface. Create a secondary // index on this keyspace func (gsi *gsiKeyspace) CreateIndex( requestId, name string, seekKey, rangeKey expression.Expressions, where expression.Expression, with value.Value) ( datastore.Index, errors.Error) { var partnStr string if seekKey != nil && len(seekKey) > 0 { partnStr = expression.NewStringer().Visit(seekKey[0]) } var whereStr string if where != nil { whereStr = expression.NewStringer().Visit(where) } secStrs := make([]string, len(rangeKey)) for i, key := range rangeKey { s := expression.NewStringer().Visit(key) secStrs[i] = s } var withJSON []byte var err error if with != nil { if withJSON, err = with.MarshalJSON(); err != nil { return nil, errors.NewError(err, "GSI error marshalling WITH clause") } } defnID, err := gsi.gsiClient.CreateIndex( name, gsi.keyspace, /*bucket-name*/ string(c.ForestDB), /*using, by default always forestdb*/ "N1QL", /*exprType*/ partnStr, whereStr, secStrs, false, /*isPrimary*/ withJSON) if err != nil { return nil, errors.NewError(err, "GSI CreateIndex()") } // refresh to get back the newly created index. if err := gsi.Refresh(); err != nil { return nil, err } return gsi.IndexById(defnID2String(defnID)) }
// Drop implement Index{} interface. func (si *secondaryIndex) Drop(requestId string) errors.Error { if si == nil { return ErrorIndexEmpty } if err := si.gsi.gsiClient.DropIndex(si.defnID); err != nil { return errors.NewError(err, "GSI Drop()") } si.gsi.delIndex(si.Id()) return nil }
func (this *Context) Recover() { err := recover() if err != nil { buf := make([]byte, 1<<16) n := runtime.Stack(buf, false) s := string(buf[0:n]) logging.Severep("", logging.Pair{"panic", err}, logging.Pair{"stack", s}) os.Stderr.WriteString(s) os.Stderr.Sync() switch err := err.(type) { case error: this.Fatal(errors.NewError(err, fmt.Sprintf("Panic: %v", err))) default: this.Fatal(errors.NewError(nil, fmt.Sprintf("Panic: %v", err))) } } }
// helper function to determine the external IP address of a query node - // used to create a name for the query node in NewQueryNode function. func ExternalIP() (string, errors.Error) { ifaces, err := net.Interfaces() if err != nil { return "", errors.NewError(err, "") } result := "" for _, iface := range ifaces { if iface.Flags&net.FlagUp == 0 { continue // interface down } if iface.Flags&net.FlagLoopback != 0 { continue // loopback interface } addrs, err := iface.Addrs() if err != nil { return "", errors.NewError(err, "") } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP } if ip == nil || ip.IsLoopback() { continue } ip = ip.To4() if ip == nil { continue // not an ipv4 address } result = ip.String() if strings.HasPrefix(result, "192") { return result, nil } } } return result, errors.NewError(nil, "Not connected to the network") }
func GetKeyspace(namespace, keyspace string) (Keyspace, errors.Error) { datastore := GetDatastore() if datastore == nil { return nil, errors.NewError(nil, "Datastore not set.") } ns, err := datastore.NamespaceByName(namespace) if err != nil { return nil, err } return ns.KeyspaceByName(keyspace) }
func (this *base) requireKey(item value.AnnotatedValue, context *Context) (string, bool) { mv := item.GetAttachment("meta") if mv == nil { context.Error(errors.NewError(nil, "Unable to find meta.")) return "", false } meta := mv.(map[string]interface{}) key, ok := meta["id"] if !ok { context.Error(errors.NewError(nil, "Unable to find key.")) return "", false } act := value.NewValue(key).Actual() switch act := act.(type) { case string: return act, true default: e := errors.NewError(nil, fmt.Sprintf("Unable to process non-string key %v of type %T.", act, act)) context.Error(e) return "", false } }
// IndexById implements datastore.Indexer{} interface. Find an index on this // keyspace using the index's id. func (gsi *gsiKeyspace) IndexById(id string) (datastore.Index, errors.Error) { gsi.rw.RLock() defer gsi.rw.RUnlock() defnID := string2defnID(id) index, ok := gsi.indexes[defnID] if !ok { index, ok = gsi.primaryIndexes[defnID] if !ok { errmsg := fmt.Sprintf("GSI index id %v not found.", id) err := errors.NewError(nil, errmsg) return nil, err } } l.Debugf("%v IndexById %v = %v", gsi.logPrefix, id, index) return index, nil }
func (this *Explain) RunOnce(context *Context, parent value.Value) { this.once.Do(func() { defer context.Recover() // Recover from any panic defer close(this.itemChannel) // Broadcast that I have stopped defer this.notify() // Notify that I have stopped bytes, err := json.Marshal(this.plan) if err != nil { context.Fatal(errors.NewError(err, "Failed to marshal JSON.")) return } value := value.NewAnnotatedValue(bytes) this.sendItem(value) }) }
// IndexByName implements datastore.Indexer{} interface. Find an index on // this keyspace using the index's name. func (gsi *gsiKeyspace) IndexByName(name string) (datastore.Index, errors.Error) { gsi.rw.RLock() defer gsi.rw.RUnlock() for _, index := range gsi.indexes { if index.Name() == name { return index, nil } } for _, index := range gsi.primaryIndexes { if index.Name() == name { return index, nil } } err := errors.NewError(nil, fmt.Sprintf("GSI index %v not found.", name)) return nil, err }
func (this *InferKeyspace) RunOnce(context *Context, parent value.Value) { this.once.Do(func() { defer context.Recover() // Recover from any panic defer close(this.itemChannel) // Broadcast that I have stopped defer this.notify() // Notify that I have stopped conn := datastore.NewValueConnection(context) defer notifyConn(conn.StopChannel()) var duration time.Duration timer := time.Now() defer context.AddPhaseTime("InferKeySpace", time.Since(timer)-duration) infer, err := context.Datastore().Inferencer(this.plan.Node().Using()) if err != nil { context.Error(errors.NewError(err, "Failed to get Inferencer")) return } go infer.InferKeyspace(this.plan.Keyspace(), this.plan.Node().With(), conn) var val value.Value ok := true for ok { select { case <-this.stopChannel: return default: } select { case val, ok = <-conn.ValueChannel(): if ok { t := time.Now() ok = this.sendItem(value.NewAnnotatedValue(val)) duration += time.Since(t) } case <-this.stopChannel: return } } }) }
/* Returns all required privileges. */ func subqueryPrivileges(exprs expression.Expressions) (datastore.Privileges, errors.Error) { subqueries, err := expression.ListSubqueries(exprs, false) if err != nil { return nil, errors.NewError(err, "") } privileges := datastore.NewPrivileges() for _, s := range subqueries { sub := s.(*Subquery) sp, e := sub.Select().Privileges() if e != nil { return nil, e } privileges.Add(sp) } return privileges, nil }
func GetKeyspace(namespace, keyspace string) (Keyspace, errors.Error) { var datastore Datastore if namespace == "#system" { datastore = GetSystemstore() } else { datastore = GetDatastore() } if datastore == nil { return nil, errors.NewError(nil, "Datastore not set.") } ns, err := datastore.NamespaceByName(namespace) if err != nil { return nil, err } return ns.KeyspaceByName(keyspace) }
func (this *SendUpdate) beforeItems(context *Context, parent value.Value) bool { if this.plan.Limit() == nil { return true } limit, err := this.plan.Limit().Evaluate(parent, context) if err != nil { context.Error(errors.NewError(err, "")) return false } switch l := limit.Actual().(type) { case float64: this.limit = int64(l) default: context.Error(errors.NewInvalidValueError(fmt.Sprintf("Invalid LIMIT %v of type %T.", l, l))) return false } return true }
func handleError(err error, tiServer string) errors.Error { if strings.Contains(strings.ToLower(err.Error()), "connection refused") { return errors.NewShellErrorCannotConnect("Unable to connect to query service " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "unsupported protocol") { return errors.NewShellErrorUnsupportedProtocol("Unsupported Protocol Scheme " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "no such host") { return errors.NewShellErrorNoSuchHost("No such Host " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "unknown port tcp") { return errors.NewShellErrorUnknownPorttcp("Unknown port " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "no host in request url") { return errors.NewShellErrorNoHostInRequestUrl("No Host in request URL " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "no route to host") { return errors.NewShellErrorNoRouteToHost("No Route to host " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "operation timed out") { return errors.NewShellErrorOperationTimeout("Operation timed out. Check query service url " + tiServer) } else if strings.Contains(strings.ToLower(err.Error()), "network is unreachable") { return errors.NewShellErrorUnreachableNetwork("Network is unreachable " + tiServer) } else { return errors.NewError(err, "") } }
// Refresh list of indexes and scanner clients. func (gsi *gsiKeyspace) Refresh() errors.Error { l.Tracef("%v gsiKeyspace.Refresh()", gsi.logPrefix) indexes, err := gsi.gsiClient.Refresh() if err != nil { return errors.NewError(err, "GSI Refresh()") } si_s := make([]*secondaryIndex, 0, len(indexes)) for _, index := range indexes { if index.Definition.Bucket != gsi.keyspace { continue } si, err := newSecondaryIndexFromMetaData(gsi, index) if err != nil { return err } si_s = append(si_s, si) } if err := gsi.setIndexes(si_s); err != nil { return err } return nil }
func NewDatastore(uri string) (datastore.Datastore, errors.Error) { if strings.HasPrefix(uri, ".") || strings.HasPrefix(uri, "/") { return file.NewDatastore(uri) } if strings.HasPrefix(uri, "http:") { return couchbase.NewDatastore(uri) } if strings.HasPrefix(uri, "dir:") { return file.NewDatastore(uri[4:]) } if strings.HasPrefix(uri, "file:") { return file.NewDatastore(uri[5:]) } if strings.HasPrefix(uri, "mock:") { return mock.NewDatastore(uri) } return nil, errors.NewError(nil, fmt.Sprintf("Invalid datastore uri: %s", uri)) }
func (this *MockQuery) Expire() { defer this.stopAndClose(server.TIMEOUT) this.response.err = errors.NewError(nil, "Query timed out") close(this.response.done) }
func (this *Server) serviceRequest(request Request) { defer func() { err := recover() if err != nil { buf := make([]byte, 1<<16) n := runtime.Stack(buf, false) s := string(buf[0:n]) logging.Severep("", logging.Pair{"panic", err}, logging.Pair{"stack", s}) os.Stderr.WriteString(s) os.Stderr.Sync() } }() request.Servicing() namespace := request.Namespace() if namespace == "" { namespace = this.namespace } prepared, err := this.getPrepared(request, namespace) if err != nil { request.Fail(err) } if (this.readonly || value.ToBool(request.Readonly())) && (prepared != nil && !prepared.Readonly()) { request.Fail(errors.NewServiceErrorReadonly("The server or request is read-only" + " and cannot accept this write statement.")) } if request.State() == FATAL { request.Failed(this) return } maxParallelism := request.MaxParallelism() if maxParallelism <= 0 { maxParallelism = this.MaxParallelism() } context := execution.NewContext(request.Id().String(), this.datastore, this.systemstore, namespace, this.readonly, maxParallelism, request.NamedArgs(), request.PositionalArgs(), request.Credentials(), request.ScanConsistency(), request.ScanVectorSource(), request.Output()) build := time.Now() operator, er := execution.Build(prepared, context) if er != nil { error, ok := er.(errors.Error) if ok { request.Fail(error) } else { request.Fail(errors.NewError(er, "")) } } if logging.LogLevel() >= logging.TRACE { request.Output().AddPhaseTime("instantiate", time.Since(build)) } if request.State() == FATAL { request.Failed(this) return } // Apply server execution timeout if this.Timeout() > 0 { timer := time.AfterFunc(this.Timeout(), func() { request.Expire() }) defer timer.Stop() } go request.Execute(this, prepared.Signature(), operator.StopChannel()) run := time.Now() operator.RunOnce(context, nil) if logging.LogLevel() >= logging.TRACE { request.Output().AddPhaseTime("run", time.Since(run)) logPhases(request) } }
func (b *keyspace) performOp(op int, inserts []datastore.Pair) ([]datastore.Pair, errors.Error) { if len(inserts) == 0 { return nil, nil } insertedKeys := make([]datastore.Pair, 0, len(inserts)) var err error for _, kv := range inserts { key := kv.Key val := kv.Value.Actual() //mv := kv.Value.GetAttachment("meta") // TODO Need to also set meta switch op { case INSERT: var added bool // add the key to the backend added, err = b.cbbucket.Add(key, 0, val) if added == false { // false & err == nil => given key aready exists in the bucket if err != nil { err = errors.NewError(err, "Key "+key) } else { err = errors.NewError(nil, "Duplicate Key "+key) } } case UPDATE: // check if the key exists and if so then use the cas value // to update the key var meta map[string]interface{} var cas uint64 var flags uint32 an := kv.Value.(value.AnnotatedValue) meta = an.GetAttachment("meta").(map[string]interface{}) cas, flags, err = getMeta(key, meta) if err != nil { // Don't perform the update if the meta values are not found logging.Errorf("Failed to get meta values for key %v, error %v", key, err) } else { logging.Debugf("CAS Value (Update) for key %v is %v flags %v value %v", key, uint64(cas), flags, val) _, err = b.cbbucket.CasWithMeta(key, int(flags), 0, uint64(cas), val) } case UPSERT: err = b.cbbucket.Set(key, 0, val) } if err != nil { if isEExistError(err) { logging.Errorf("Failed to perform update on key %s. CAS mismatch due to concurrent modifications", key) } else { logging.Errorf("Failed to perform %s on key %s for Keyspace %s Error %v", opToString(op), key, b.Name(), err) } } else { insertedKeys = append(insertedKeys, kv) } } if len(insertedKeys) == 0 { return nil, errors.NewCbDMLError(err, "Failed to perform "+opToString(op)) } return insertedKeys, nil }
func (vi *viewIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) doneChannel := make(chan bool) defer close(doneChannel) go WalkViewInBatches(viewRowChannel, viewErrChannel, doneChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), vi.IsPrimary(), viewOptions, _BATCH_SIZE, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.IsPrimary() == false { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { conn.Error(errors.NewError(err, "View Row "+fmt.Sprintf("%v", viewRow.Key))) } } select { case conn.EntryChannel() <- &entry: sentRows = true numRows++ case <-conn.StopChannel(): logging.Debugf(" Asked to stop after sending %v rows", numRows) ok = false } } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Debugf("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name()+" or view index missing")) return } } conn.Error(err) return } } } logging.Debugf("Number of entries fetched from the index %d", numRows) }
func n1qlError(client *qclient.GsiClient, err error) errors.Error { return errors.NewError(err, client.DescribeError(err)) }
import l "github.com/couchbase/indexing/secondary/logging" import c "github.com/couchbase/indexing/secondary/common" import "github.com/couchbase/indexing/secondary/collatejson" import qclient "github.com/couchbase/indexing/secondary/queryport/client" import mclient "github.com/couchbase/indexing/secondary/manager/client" import "github.com/couchbase/query/datastore" import "github.com/couchbase/query/errors" import "github.com/couchbase/query/expression" import "github.com/couchbase/query/expression/parser" import "github.com/couchbase/query/timestamp" import "github.com/couchbase/query/value" import qlog "github.com/couchbase/query/logging" // ErrorIndexEmpty is index not initialized. var ErrorIndexEmpty = errors.NewError( fmt.Errorf("gsi.indexEmpty"), "Fatal null reference to index") // ErrorIndexNotAvailable means client indexes list needs to be // refreshed. var ErrorIndexNotAvailable = fmt.Errorf("index not available") var n1ql2GsiInclusion = map[datastore.Inclusion]qclient.Inclusion{ datastore.NEITHER: qclient.Neither, datastore.LOW: qclient.Low, datastore.HIGH: qclient.High, datastore.BOTH: qclient.Both, } var gsi2N1QLState = map[c.IndexState]datastore.IndexState{ c.INDEX_STATE_CREATED: datastore.PENDING, c.INDEX_STATE_READY: datastore.PENDING, c.INDEX_STATE_INITIAL: datastore.PENDING,