// simplest possible implementation // 1. read all plans off plan channel // 2. return last plan func (this *SimpleOptimizer) Optimize(planChannel plan.PlanChannel, errChannel query.ErrorChannel) (*plan.Plan, query.Error) { plans := make([]plan.Plan, 0) var p plan.Plan var err query.Error ok := true for ok { select { case p, ok = <-planChannel: if ok { clog.To(optimizer.CHANNEL, "See plan %v", p) plans = append(plans, p) } case err, ok = <-errChannel: if err != nil { return nil, err } } } if len(plans) > 0 { chosenPlan := plans[len(plans)-1] clog.To(optimizer.CHANNEL, "Choosing plan %v", chosenPlan) return &chosenPlan, nil } return nil, query.NewError(nil, "No plans produced for optimizer to choose from") }
func (this *HttpQuery) Process() { err := this.response.Process() if err != nil { clog.To(CHANNEL, "error writing to client, aborting query") this.StopProcessing() } else { clog.To(CHANNEL, "response complete") } }
func (this *KeyJoin) Run(stopChannel misc.StopChannel) { clog.To(CHANNEL, "key join operator starting") if this.Base.Source != nil { this.Base.RunOperator(this, stopChannel) } else { this.Base.SendError(query.NewError(fmt.Errorf("missing source operator"), "")) } clog.To(CHANNEL, "key join operator finished, fetched %d", this.rowsFetched) }
func (this *StubSource) Run(stopChannel misc.StopChannel) { clog.To(CHANNEL, "stub source operator starting") defer close(this.itemChannel) defer close(this.supportChannel) for _, item := range this.data { this.itemChannel <- item } clog.To(CHANNEL, "stub source operator finished") }
func (this *InterpretedExecutor) executeInternal(optimalPlan *plan.Plan, q network.Query, timeoutStopChannel misc.StopChannel) { clog.To(executor.CHANNEL, "simple executor started") // first make the plan excutable executablePipeline, berr := this.xpipelinebuilder.Build(optimalPlan, q) if berr != nil { q.Response().SendError(query.NewError(berr, "")) return } root := executablePipeline.Root // create a stop channel stopChannel := make(misc.StopChannel) // set it on the query object, so HTTP layer can // stop us if the client goes away q.SetStopChannel(stopChannel) go root.Run(stopChannel) // now execute it var item *dparval.Value var obj interface{} sourceItemChannel, supportChannel := root.GetChannels() ok := true for ok { select { case item, ok = <-sourceItemChannel: if ok { ok = this.processItem(q, item) clog.To(executor.CHANNEL, "simple executor sent client item: %v", item) } case obj, ok = <-supportChannel: if ok { switch obj := obj.(type) { case query.Error: q.Response().SendError(obj) clog.To(executor.CHANNEL, "simple executor sent client error: %v", obj) if obj.IsFatal() { return } } } case _, ok = <-timeoutStopChannel: clog.To(executor.CHANNEL, "simple execution aborted, timeout") return } } q.Response().NoMoreResults() clog.To(executor.CHANNEL, "simple executor finished") }
func (this *EliminateDuplicates) afterItems() { // write the output for pos, item := range this.buffer { // we will nil out duplicates and then skip over those entries in the buffer if item != nil { if pos < len(this.buffer) { // look to see if the exact same item appears later in the buffer for nextpos, nextitem := range this.buffer[pos+1:] { itemProj, ok := item.GetAttachment("projection").(*dparval.Value) if ok { itemVal := itemProj.Value() if nextitem != nil { nextItemProj, ok := nextitem.GetAttachment("projection").(*dparval.Value) if ok { nextItemVal := nextItemProj.Value() comp := ast.CollateJSON(itemVal, nextItemVal) if comp == 0 { this.buffer[pos+nextpos+1] = nil } } } } } } clog.To(DEBUG_DUP_CHANNEL, "distinct: %v", item) this.Base.SendItem(item) } } }
func newPool(s *site, name string) (*pool, query.Error) { clog.To(catalog.CHANNEL, "Created New Pool %s", name) cbpool, err := s.client.GetPool(name) if err != nil { if name == "default" { // if default pool is not available, try reconnecting to the server url := s.URL() client, err := cb.Connect(url) if err != nil { return nil, query.NewError(nil, fmt.Sprintf("Pool %v not found.", name)) } // check if the default pool exists cbpool, err = client.GetPool(name) if err != nil { return nil, query.NewError(nil, fmt.Sprintf("Pool %v not found.", name)) } s.client = client } } rv := pool{ site: s, name: name, cbpool: cbpool, bucketCache: make(map[string]catalog.Bucket), } go keepPoolFresh(&rv) return &rv, nil }
func newBucket(p *pool, name string) (*bucket, query.Error) { clog.To(catalog.CHANNEL, "Created New Bucket %s", name) cbbucket, err := p.cbpool.GetBucket(name) if err != nil { // go-couchbase caches the buckets // to be sure no such bucket exists right now // we trigger a refresh p.refresh() // and then check one more time cbbucket, err = p.cbpool.GetBucket(name) if err != nil { // really no such bucket exists return nil, query.NewError(nil, fmt.Sprintf("Bucket %v not found.", name)) } } rv := &bucket{ pool: p, name: name, cbbucket: cbbucket, indexes: make(map[string]catalog.Index), } ierr := rv.loadIndexes() if err != nil { return nil, ierr } return rv, nil }
func (this *HttpEndpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) { clog.To(CHANNEL, "request received") q := NewHttpQuery(w, r) if q != nil { this.queryChannel <- q q.Process() } }
func (this *Fetch) Run(stopChannel misc.StopChannel) { clog.To(CHANNEL, "fetch operator starting") if this.Base.Source != nil { this.Base.RunOperator(this, stopChannel) } else { defer close(this.Base.itemChannel) defer close(this.Base.supportChannel) defer close(this.Base.upstreamStopChannel) for _, id := range this.ids { doc := dparval.NewValue(map[string]interface{}{}) doc.SetAttachment("meta", map[string]interface{}{"id": id}) this.processItem(doc) } this.afterItems() } clog.To(CHANNEL, "fetch operator finished, fetched %d", this.rowsFetched) }
func (this *KeyScan) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() clog.To(CHANNEL, "key scan operator starting") for _, item := range this.keylist { this.rowsKeyScanned += 1 // rematerialize an object from the data returned by this index entry doc := dparval.NewValue(map[string]interface{}{}) // attach metadata doc.SetAttachment("meta", map[string]interface{}{"id": item}) this.SendItem(doc) } clog.To(CHANNEL, "key scan operator finished, scanned %d", this.rowsKeyScanned) }
func (p *pool) refresh() { // trigger refresh of this pool clog.To(catalog.CHANNEL, "Refreshing Pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { clog.Warnf("Error updating pool: %v", err) return } p.cbpool = newpool }
func WalkViewInBatches(result chan cb.ViewRow, errs query.ErrorChannel, bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { clog.Error(fmt.Errorf("View Walking Panic: %v\n%s", r, debug.Stack())) errs <- query.NewError(nil, "Panic In View Walking") } }() options["limit"] = batchSize + 1 numRead := int64(0) ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { clog.To(NETWORK_CHANNEL, "Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- query.NewError(err, "Unable to access view") return } for i, row := range vres.Rows { if int64(i) < batchSize { // dont process the last row, its just used to see if we // need to continue processing result <- row numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } }
func (this *DropIndex) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() this.downstreamStopChannel = stopChannel clog.To(CHANNEL, "drop_index operator starting") err := this.index.Drop() if err != nil { this.SendError(err) } else { item := dparval.NewValue(map[string]interface{}{}) item.SetAttachment("projection", map[string]interface{}{ "dropped": true, }) this.SendItem(item) } clog.To(CHANNEL, "drop_index operator finished") }
func (this *CreateIndex) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() indexType := catalog.IndexType(strings.ToLower(this.index_type)) indexOn := make(catalog.IndexKey, len(this.on)) for pos, key := range this.on { indexOn[pos] = key } this.downstreamStopChannel = stopChannel var index catalog.Index var err query.Error if this.primary { clog.To(CHANNEL, "create_index (primary) operator starting") index, err = this.bucket.CreatePrimaryIndex() } else { clog.To(CHANNEL, "create_index (secondary) operator starting") index, err = this.bucket.CreateIndex(this.name, indexOn, indexType) } if err != nil { this.SendError(err) } else { if index != nil { item := dparval.NewValue(map[string]interface{}{}) item.SetAttachment("projection", map[string]interface{}{ "id": index.Id(), "name": index.Name(), }) this.SendItem(item) } else { clog.Warn("Successfully created index, but index was nil") } } clog.To(CHANNEL, "create_index operator finished") }
func (this *Scan) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() clog.To(CHANNEL, "scan operator starting") if this.ranges == nil { this.scanRange(nil) } else { for _, scanRange := range this.ranges { ok := this.scanRange(scanRange) if !ok { break } } } clog.To(CHANNEL, "scan operator finished, scanned %d", this.rowsScanned) }
func (this *Explain) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() this.downstreamStopChannel = stopChannel clog.To(CHANNEL, "explain operator starting") item := dparval.NewValue(map[string]interface{}{}) planBytes, err := json.Marshal(this.Plan) if err != nil { this.SendError(query.NewError(err, "error serializing plan to JSON")) } else { projection := dparval.NewValueFromBytes(planBytes) item.SetAttachment("projection", projection) this.SendItem(item) } clog.To(CHANNEL, "explain operator finished") }
func CanIUseThisIndexForThisWhereClause(index catalog.RangeIndex, where ast.Expression, bucket string) (bool, plan.ScanRanges, ast.Expression, error) { // convert the index key to formal notation indexKeyFormal, err := IndexKeyInFormalNotation(index.Key(), bucket) if err != nil { return false, nil, nil, err } // put the where clause into conjunctive normal form ennf := ast.NewExpressionNNF() whereNNF, err := where.Accept(ennf) if err != nil { return false, nil, nil, err } ecnf := ast.NewExpressionCNF() whereCNF, err := whereNNF.Accept(ecnf) if err != nil { return false, nil, nil, err } switch whereCNF := whereCNF.(type) { case *ast.AndOperator: // this is an and, we can try to satisfy individual operands found := false rranges := plan.ScanRanges{} for _, oper := range whereCNF.Operands { // see if the where clause expression is sargable with respect to the index key es := NewExpressionSargable(indexKeyFormal[0]) oper.Accept(es) if es.IsSargable() { found = true for _, ran := range es.ScanRanges() { rranges = MergeRanges(rranges, ran) clog.To(planner.CHANNEL, "now ranges are: %v", rranges) } } } if found { return true, rranges, nil, nil } default: // not an and, we must satisfy the whole expression // see if the where clause expression is sargable with respect to the index key es := NewExpressionSargable(indexKeyFormal[0]) whereCNF.Accept(es) if es.IsSargable() { return true, es.ScanRanges(), nil, nil } } // cannot use this index return false, nil, nil, nil }
// NewSite creates a new Couchbase site for the given url. func NewSite(url string) (catalog.Site, query.Error) { clog.To(catalog.CHANNEL, "Created New Site %s", url) client, err := cb.Connect(url) if err != nil { return nil, query.NewError(err, "") } return &site{ client: client, poolCache: make(map[string]catalog.Pool), }, nil }
func (this *InterpretedExecutor) Execute(optimalPlan *plan.Plan, q network.Query, timeout *time.Duration) { stopChannel := make(misc.StopChannel) if timeout.Nanoseconds() < 0 { this.executeInternal(optimalPlan, q, stopChannel) } else { c := make(chan error, 1) go func() { this.executeInternal(optimalPlan, q, stopChannel) c <- nil }() select { case <-c: return case <-time.After(*timeout): clog.To(executor.CHANNEL, "simple executor timeout trigger") close(stopChannel) clog.To(executor.CHANNEL, "stop channel closed") } <-c q.Response().SendError(query.NewTimeoutError(timeout)) } }
func ViewTotalRows(bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}) (int64, query.Error) { options["limit"] = 0 logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { clog.To(NETWORK_CHANNEL, "Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { return 0, query.NewError(err, "Unable to access view") } return int64(vres.TotalRows), nil }
func newPool(s *site, name string) (*pool, query.Error) { clog.To(catalog.CHANNEL, "Created New Pool %s", name) cbpool, err := s.client.GetPool(name) if err != nil { return nil, query.NewError(nil, fmt.Sprintf("Pool %v not found.", name)) } rv := pool{ site: s, name: name, cbpool: cbpool, bucketCache: make(map[string]catalog.Bucket), } go keepPoolFresh(&rv) return &rv, nil }
func (this *FastCount) Run(stopChannel misc.StopChannel) { defer close(this.itemChannel) defer close(this.supportChannel) // this MUST be here so that it runs before the channels are closed defer this.RecoverPanic() clog.To(CHANNEL, "fastcount operator starting") if this.ranges == nil { if this.index == nil { count, err := this.bucket.Count() if err != nil { this.SendError(query.NewError(err, "Error counting values in bucket")) } else { groupDoc := dparval.NewValue(map[string]interface{}{}) aggregates := map[string]interface{}{"COUNT-true-<nil>-false": dparval.NewValue(float64(count))} groupDoc.SetAttachment("aggregates", aggregates) this.SendItem(groupDoc) } } else { count, err := this.index.ValueCount() if err != nil { this.SendError(query.NewError(err, "Error counting values in index")) } else { groupDoc := dparval.NewValue(map[string]interface{}{}) aggkey := fmt.Sprintf("COUNT-false-%v-false", this.expr) aggregates := map[string]interface{}{aggkey: dparval.NewValue(float64(count))} groupDoc.SetAttachment("aggregates", aggregates) this.SendItem(groupDoc) } } } // FIXME eventually we can support counting ranges clog.To(CHANNEL, "fastcount operator finished") }
func (this *Project) processItem(item *dparval.Value) bool { resultMap := map[string]interface{}{} for _, resultItem := range this.Result { val, err := this.Base.projectedValueOfResultExpression(item, resultItem) if err != nil { switch err := err.(type) { case *dparval.Undefined: // undefined contributes nothing to the result map continue default: return this.Base.SendError(query.NewError(err, "unexpected error projecting expression")) } } if resultItem.Star { if val != nil { valval := val.Value() switch valval := valval.(type) { case map[string]interface{}: // then if the result was an object // add its contents ot the result map for k, v := range valval { resultMap[k] = v } } } } else { resultMap[resultItem.As] = val } } if !this.projectEmpty && len(resultMap) == 0 { return true } // build a Value from the projection projection := dparval.NewValue(resultMap) // store the projection as an attachment on the main item item.SetAttachment("projection", projection) clog.To(DEBUG_PROJECT_CHANNEL, "projecting: %v", item) // write to the output return this.Base.SendItem(item) }
func CanFastCountIndex(index catalog.CountIndex, bucket string, resultExprList ast.ResultExpressionList) ast.Expression { // convert the index key to formal notation indexKeyFormal, err := IndexKeyInFormalNotation(index.Key(), bucket) if err != nil { return nil } deps := ast.ExpressionList{indexKeyFormal[0]} clog.To(planner.CHANNEL, "index deps are: %v", deps) depChecker := ast.NewExpressionFunctionalDependencyCheckerFull(deps) // start looking at the projection for _, resultExpr := range resultExprList { // cannot be * if resultExpr.Star { return nil } switch resultExpr := resultExpr.Expr.(type) { case *ast.FunctionCallCount: // aggregates all take 1 operand operands := resultExpr.GetOperands() if len(operands) < 1 { return nil } aggOperand := operands[0] // must NOT be * if aggOperand.Star { return nil } // look at dependencies inside this operand _, err := depChecker.Visit(aggOperand.Expr) if err != nil { return nil } default: return nil } } // if we made it this far, can do fast count on bucket return indexKeyFormal[0] }
func NewHttpQuery(w http.ResponseWriter, r *http.Request, info bool) *HttpQuery { q := HttpQuery{startTime: time.Now(), info: info} queryString := findQueryStringInRequest(r) if queryString == "" { showError(w, "Missing required query string", 500) return nil } else { clog.To(CHANNEL, "query string: %v", queryString) } q.request = network.StringQueryRequest{QueryString: queryString} httpResponse := &HttpResponse{query: &q, w: w, results: make(chan interface{}), returnInfo: info} q.response = httpResponse return &q }
func (p *pool) refresh() { // trigger refresh of this pool clog.To(catalog.CHANNEL, "Refreshing Pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { clog.Warnf("Error updating pool name %s: Error %v", p.name, err) url := p.site.URL() client, err := cb.Connect(url) if err != nil { clog.Warnf("Error connecting to URL %s", url) } // check if the default pool exists newpool, err = client.GetPool(p.name) if err != nil { clog.Warnf("Retry Failed Error updating pool name %s: Error %v", p.name, err) } p.site.client = client return } p.cbpool = newpool }
func CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index catalog.RangeIndex, resultExprList ast.ResultExpressionList, bucket string) (bool, plan.ScanRanges, ast.Expression, error) { // convert the index key to formal notation indexKeyFormal, err := IndexKeyInFormalNotation(index.Key(), bucket) if err != nil { return false, nil, nil, err } // FIXME only looking at first element in key right now deps := ast.ExpressionList{indexKeyFormal[0]} clog.To(planner.CHANNEL, "index deps are: %v", deps) depChecker := ast.NewExpressionFunctionalDependencyCheckerFull(deps) // start looking at the projection allAggregateFunctionsMin := true for _, resultExpr := range resultExprList { // presence of * means we cannot use index on field, must see all (for this particular optimization) if resultExpr.Star { return false, nil, nil, nil } switch expr := resultExpr.Expr.(type) { case ast.AggregateFunctionCallExpression: _, isMin := expr.(*ast.FunctionCallMin) if !isMin { clog.To(planner.CHANNEL, "projection not MIN") allAggregateFunctionsMin = false } // aggregates all take 1 operand operands := expr.GetOperands() if len(operands) < 1 { return false, nil, nil, nil } aggOperand := operands[0] // preence of * means we cannot use this index, must see all (for this particular optimization) if aggOperand.Star { return false, nil, nil, nil } // look at dependencies inside this operand _, err := depChecker.Visit(aggOperand.Expr) if err != nil { return false, nil, nil, nil } default: // all expressions must be aggregates for this particular optimization return false, nil, nil, nil } } // if we made it this far, we can in fact use the index // doing a scan of all non-eliminatable items (non-NULL, non-MISSING) dummyOp := ast.NewIsNotNullOperator(indexKeyFormal[0]) es := NewExpressionSargable(indexKeyFormal[0]) dummyOp.Accept(es) if es.IsSargable() { ranges := es.ScanRanges() if allAggregateFunctionsMin { for _, r := range ranges { r.Limit = 1 } } return true, ranges, nil, nil } clog.Error(fmt.Errorf("expected this to never happen")) // cannot use this index return false, nil, nil, nil }
func DoesIndexCoverStatement(index catalog.RangeIndex, stmt *ast.SelectStatement) bool { if stmt.From.Over != nil { // index cannot cover queries containing OVER right now return false } // convert the index key to formal notation indexKeyFormal, err := IndexKeyInFormalNotation(index.Key(), stmt.From.As) if err != nil { return false } deps := ast.ExpressionList{} for _, indexKey := range indexKeyFormal { deps = append(deps, indexKey) } clog.To(planner.CHANNEL, "index deps are: %v", deps) depChecker := ast.NewExpressionFunctionalDependencyCheckerFull(deps) // first check the projection for _, resultExpr := range stmt.Select { if resultExpr.Star == true || resultExpr.Expr == nil { // currently cannot cover * return false } _, err := depChecker.Visit(resultExpr.Expr) if err != nil { return false } } if stmt.Where != nil { _, err = depChecker.Visit(stmt.Where) if err != nil { return false } } if stmt.GroupBy != nil { for _, groupExpr := range stmt.GroupBy { _, err = depChecker.Visit(groupExpr) if err != nil { return false } } if stmt.Having != nil { _, err = depChecker.Visit(stmt.Having) if err != nil { return false } } } if stmt.OrderBy != nil { for _, orderExpr := range stmt.OrderBy { _, err = depChecker.Visit(orderExpr.Expr) if err != nil { return false } } } // if we go this far it is covered return true }
func (this *SimplePlanner) buildSelectStatementPlans(stmt *ast.SelectStatement, pc plan.PlanChannel, ec query.ErrorChannel) { var planHeads []plan.PlanElement from := stmt.GetFrom() if from == nil { // point to :system.dual from = &ast.From{Pool: system.POOL_NAME, Bucket: system.BUCKET_NAME_DUAL} } // get the pool poolName := from.Pool if poolName == "" { poolName = this.defaultPool } pool, err := this.site.PoolByName(poolName) if err != nil { ec <- query.NewPoolDoesNotExist(poolName) return } bucket, err := pool.BucketByName(from.Bucket) if err != nil { ec <- query.NewBucketDoesNotExist(from.Bucket) return } // find all docs index indexes, err := bucket.Indexes() if err != nil { ec <- query.NewError(err, fmt.Sprintf("No indexes found for bucket %v", from.Bucket)) return } var keylist []string if stmt.Keys != nil { keylist = stmt.Keys.GetKeys() } clog.To(planner.CHANNEL, "Indexes in bucket %v", indexes) if keylist == nil { for _, index := range indexes { var lastStep plan.PlanElement switch index := index.(type) { case catalog.PrimaryIndex: clog.To(planner.CHANNEL, "See primary index %v", index.Name()) // if from.Over == nil && stmt.Where == nil && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 && CanFastCountBucket(stmt.Select) { // lastStep = plan.NewFastCount(pool.Name(), bucket.Name(), "", nil, nil) // } else { lastStep = plan.NewScan(pool.Name(), bucket.Name(), index.Name(), nil) // } case catalog.RangeIndex: // see if this index can be used clog.To(planner.CHANNEL, "See index %v", index.Name()) clog.To(planner.CHANNEL, "with Key %v", index.Key()) if stmt.Where != nil && from.Projection == nil { possible, ranges, _, err := CanIUseThisIndexForThisWhereClause(index, stmt.Where, stmt.From.As) if err != nil { clog.Error(err) continue } clog.To(planner.CHANNEL, "Can I use it1: %v", possible) if possible { // great, but lets check for a min optimizatin too if stmt.GroupBy != nil && len(stmt.GroupBy) == 0 { possible, minranges, _, _ := CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index, stmt.Select, stmt.From.As) if possible { for _, r := range ranges { r.Limit = minranges[0].Limit } } } scan := plan.NewScan(pool.Name(), bucket.Name(), index.Name(), ranges) // see if this index covers the query if DoesIndexCoverStatement(index, stmt) { scan.Cover = true scan.As = from.As } lastStep = scan } else { continue } } else if from.Projection == nil { // try to do a fast count if its possible doingFastCount := false // countIndex, isCountIndex := index.(catalog.CountIndex) // if isCountIndex { // fastCountIndexOnExpr := CanFastCountIndex(countIndex, stmt.From.As, stmt.Select) // if fastCountIndexOnExpr != nil && from.Over == nil && stmt.Where == nil && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 { // lastStep = plan.NewFastCount(pool.Name(), bucket.Name(), countIndex.Name(), fastCountIndexOnExpr, nil) // doingFastCount = true // } // } // this works for aggregates on the whole bucket if !doingFastCount && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 { possible, ranges, _, err := CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index, stmt.Select, stmt.From.As) if err != nil { clog.Error(err) continue } clog.To(planner.CHANNEL, "Can I use it2: %v", possible) if possible { lastStep = plan.NewScan(pool.Name(), bucket.Name(), index.Name(), ranges) } else { continue } } else if !doingFastCount { continue } } default: clog.To(planner.CHANNEL, "Unsupported type of index %T", index) continue } scanOp, lastStepWasScan := lastStep.(*plan.Scan) if lastStepWasScan { if !scanOp.Cover { lastStep = plan.NewFetch(lastStep, pool.Name(), bucket.Name(), from.Projection, from.As) nextFrom := from.Over for nextFrom != nil { // add document joins if nextFrom.Keys != nil { // This is a key-join lastStep = plan.NewKeyJoin(lastStep, pool.Name(), nextFrom.Bucket, nextFrom.Projection, nextFrom.Type, nextFrom.Oper, *nextFrom.Keys, nextFrom.As) } else { lastStep = plan.NewUnnest(lastStep, nextFrom.Projection, nextFrom.Type, nextFrom.As) } nextFrom = nextFrom.Over } } } planHeads = append(planHeads, lastStep) } } else if keylist != nil { // if keylist is present then we avoid a bucket scan var lastStep plan.PlanElement lastStep = plan.NewKeyScan(keylist) lastStep = plan.NewFetch(lastStep, pool.Name(), bucket.Name(), from.Projection, from.As) nextFrom := from.Over for nextFrom != nil { // add in-document joins if nextFrom.Keys != nil { lastStep = plan.NewKeyJoin(lastStep, pool.Name(), nextFrom.Bucket, nextFrom.Projection, nextFrom.Type, nextFrom.Oper, *nextFrom.Keys, nextFrom.As) } else { lastStep = plan.NewUnnest(lastStep, nextFrom.Projection, nextFrom.Type, nextFrom.As) } nextFrom = nextFrom.Over } planHeads = append(planHeads, lastStep) } if len(planHeads) == 0 { ec <- query.NewError(nil, fmt.Sprintf("No usable indexes found for bucket %v", from.Bucket)) return } // now for all the plan heads, create a full plan for _, lastStep := range planHeads { if stmt.GetWhere() != nil { ids := WhereClauseFindById(stmt.GetWhere()) fetch, ok := lastStep.(*plan.Fetch) if ids != nil && ok { fetch.ConvertToIds(ids) } else { lastStep = plan.NewFilter(lastStep, stmt.GetWhere()) } } if stmt.GetGroupBy() != nil { _, isFastCount := lastStep.(*plan.FastCount) if !isFastCount { lastStep = plan.NewGroup(lastStep, stmt.GetGroupBy(), stmt.GetAggregateReferences()) } } if stmt.GetHaving() != nil { lastStep = plan.NewFilter(lastStep, stmt.GetHaving()) } lastStep = plan.NewProjector(lastStep, stmt.GetResultExpressionList(), true) if stmt.IsDistinct() { lastStep = plan.NewEliminateDuplicates(lastStep) } if stmt.GetOrderBy() != nil { explicitAliases := stmt.GetExplicitProjectionAliases() lastStep = plan.NewOrder(lastStep, stmt.GetOrderBy(), explicitAliases) } if stmt.GetOffset() != 0 { lastStep = plan.NewOffset(lastStep, stmt.GetOffset()) } if stmt.GetLimit() >= 0 { lastStep = plan.NewLimit(lastStep, stmt.GetLimit()) } if stmt.ExplainOnly { lastStep = plan.NewExplain(lastStep) } pc <- plan.Plan{Root: lastStep} } }