func (this *StandardCompiler) Compile(queryString string) (*plan.Plan, query.Error) { ast, err := this.parser.Parse(queryString) if err != nil { return nil, query.NewParseError(err, "Parse Error") } // perform semantic verification err = ast.VerifySemantics() if err != nil { return nil, query.NewSemanticError(err, "Semantic Error") } // simplify the statement err = ast.Simplify() if err != nil { return nil, query.NewError(err, "Error Simplifying Expression") } planChannel, planErrChannel := this.planner.Plan(ast) optimalPlan, err := this.optimizer.Optimize(planChannel, planErrChannel) if err != nil { return nil, query.NewError(err, "Optimizer Error") } return optimalPlan, nil }
func (s *site) loadPools() (e query.Error) { dirEntries, err := ioutil.ReadDir(s.path) if err != nil { return query.NewError(err, "") } s.pools = make(map[string]*pool) s.poolNames = make([]string, 0) var p *pool for _, dirEntry := range dirEntries { if dirEntry.IsDir() { s.poolNames = append(s.poolNames, dirEntry.Name()) diru := strings.ToUpper(dirEntry.Name()) if _, ok := s.pools[diru]; ok { return query.NewError(nil, "Duplicate pool name "+dirEntry.Name()) } p, e = newPool(s, dirEntry.Name()) if e != nil { return } s.pools[diru] = p } } return }
func (pi *bucketIndex) Lookup(value catalog.LookupValue, ch catalog.EntryChannel, warnch, errch query.ErrorChannel) { defer close(ch) defer close(warnch) defer close(errch) if value == nil || len(value) != 1 || value[0].Type() != dparval.STRING { errch <- query.NewError(nil, "Invalid lookup value: string required.") return } val, ok := value[0].Value().(string) if !ok { errch <- query.NewError(nil, "Invalid lookup value: string required.") return } ids := strings.SplitN(val, "/", 2) if len(ids) != 2 { return } pool, _ := pi.bucket.pool.site.actualSite.PoolById(ids[0]) if pool == nil { return } bucket, _ := pool.BucketById(ids[1]) if bucket != nil { entry := catalog.IndexEntry{PrimaryKey: fmt.Sprintf("%s/%s", pool.Id(), bucket.Id())} ch <- &entry } }
func (b *indexbucket) Count() (int64, query.Error) { count := int64(0) poolIds, err := b.pool.site.actualSite.PoolIds() if err == nil { for _, poolId := range poolIds { pool, err := b.pool.site.actualSite.PoolById(poolId) if err == nil { bucketIds, err := pool.BucketIds() if err == nil { for _, bucketId := range bucketIds { bucket, err := pool.BucketById(bucketId) if err == nil { indexIds, err := bucket.IndexIds() if err == nil { count += int64(len(indexIds)) } else { return 0, query.NewError(err, "") } } else { return 0, query.NewError(err, "") } } } else { return 0, query.NewError(err, "") } } else { return 0, query.NewError(err, "") } } return count, nil } return 0, query.NewError(err, "") }
func (pi *primaryIndex) Lookup(value catalog.LookupValue, ch catalog.EntryChannel, warnch, errch query.ErrorChannel) { defer close(ch) defer close(warnch) defer close(errch) if value == nil || len(value) != 1 || value[0].Type() != dparval.STRING { errch <- query.NewError(nil, "Invalid lookup value: string required.") return } val, ok := value[0].Value().(string) if !ok { errch <- query.NewError(nil, "Invalid lookup value: string required.") return } fi, err := os.Lstat(filepath.Join(pi.bucket.path(), val+".json")) if err != nil && !os.IsNotExist(err) { errch <- query.NewError(err, "IO error during lookup.") return } if fi != nil { entry := catalog.IndexEntry{EntryKey: value, PrimaryKey: val} ch <- &entry } }
func (p *pool) loadBuckets() (e query.Error) { dirEntries, err := ioutil.ReadDir(p.path()) if err != nil { return query.NewError(err, "") } p.buckets = make(map[string]*bucket) p.bucketNames = make([]string, 0) var b *bucket for _, dirEntry := range dirEntries { if dirEntry.IsDir() { diru := strings.ToUpper(dirEntry.Name()) if _, ok := p.buckets[diru]; ok { return query.NewError(nil, "Duplicate bucket name "+dirEntry.Name()) } b, e = newBucket(p, dirEntry.Name()) if e != nil { return } p.buckets[diru] = b p.bucketNames = append(p.bucketNames, b.Name()) } } return }
func newPool(s *site, name string) (*pool, query.Error) { clog.To(catalog.CHANNEL, "Created New Pool %s", name) cbpool, err := s.client.GetPool(name) if err != nil { if name == "default" { // if default pool is not available, try reconnecting to the server url := s.URL() client, err := cb.Connect(url) if err != nil { return nil, query.NewError(nil, fmt.Sprintf("Pool %v not found.", name)) } // check if the default pool exists cbpool, err = client.GetPool(name) if err != nil { return nil, query.NewError(nil, fmt.Sprintf("Pool %v not found.", name)) } s.client = client } } rv := pool{ site: s, name: name, cbpool: cbpool, bucketCache: make(map[string]catalog.Bucket), } go keepPoolFresh(&rv) return &rv, nil }
func WalkViewInBatches(result chan cb.ViewRow, errs query.ErrorChannel, bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { clog.Error(fmt.Errorf("View Walking Panic: %v\n%s", r, debug.Stack())) errs <- query.NewError(nil, "Panic In View Walking") } }() options["limit"] = batchSize + 1 numRead := int64(0) ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { clog.To(NETWORK_CHANNEL, "Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- query.NewError(err, "Unable to access view") return } for i, row := range vres.Rows { if int64(i) < batchSize { // dont process the last row, its just used to see if we // need to continue processing result <- row numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } }
func (vi *viewIndex) Drop() query.Error { bucket := vi.bucket if vi.IsPrimary() { return query.NewError(nil, "Primary index cannot be dropped.") } err := vi.DropViewIndex() if err != nil { return query.NewError(err, fmt.Sprintf("Cannot drop index %s", vi.Name())) } delete(bucket.indexes, vi.name) return nil }
func (b *bucket) CreatePrimaryIndex() (catalog.PrimaryIndex, query.Error) { if _, exists := b.indexes[PRIMARY_INDEX]; exists { return nil, query.NewError(nil, "Primary index already exists") } idx, err := newPrimaryIndex(b) if err != nil { return nil, query.NewError(err, "Error creating primary index") } b.indexes[idx.Name()] = idx return idx, nil }
func (b *bucket) IndexByName(name string) (catalog.Index, query.Error) { index, ok := b.indexes[name] if !ok { return nil, query.NewError(nil, fmt.Sprintf("Index %v not found.", name)) } return index, nil }
func (this *KeyJoin) joinItems(item *dparval.Value, keyItem *dparval.Value) bool { if keyItem == nil { if this.Type == "LEFT" { return this.Base.SendItem(item) } return true } newItem := item.Duplicate() /* join the item and ship it */ if this.Projection != nil { keyProj, Error := this.Base.Evaluate(this.Projection, keyItem) if Error != nil { switch err := Error.(type) { case *dparval.Undefined: return true default: return this.Base.SendError(query.NewError(err, "Internal error in KeyJoin")) } } newItem.SetPath(this.As, keyProj) } else { newItem.SetPath(this.As, keyItem) } this.rowsFetched += 1 this.Base.SendItem(newItem) return true }
func (b *bucket) Count() (int64, query.Error) { dirEntries, err := ioutil.ReadDir(b.path()) if err != nil { return 0, query.NewError(err, "") } return int64(len(dirEntries)), nil }
func (b *bucket) BulkFetch(ids []string) (map[string]*dparval.Value, query.Error) { rv := make(map[string]*dparval.Value, 0) bulkResponse, err := b.cbbucket.GetBulk(ids) if err != nil { return nil, query.NewError(err, "Error doing bulk get") } for k, v := range bulkResponse { doc := dparval.NewValueFromBytes(v.Body) meta_flags := (v.Extras[0]&0xff)<<24 | (v.Extras[1]&0xff)<<16 | (v.Extras[2]&0xff)<<8 | (v.Extras[3] & 0xff) meta_type := "json" if doc.Type() == dparval.NOT_JSON { meta_type = "base64" } doc.SetAttachment("meta", map[string]interface{}{ "id": k, "cas": float64(v.Cas), "type": meta_type, "flags": float64(meta_flags), }) rv[k] = doc } return rv, nil }
func (b *bucket) CreatePrimaryIndex() (catalog.PrimaryIndex, query.Error) { if b.primary != nil { return b.primary, nil } return nil, query.NewError(nil, "Not supported.") }
func (this *FastCount) RecoverPanic() { r := recover() if r != nil { clog.Error(fmt.Errorf("Query Execution Panic: %v\n%s", r, debug.Stack())) this.SendError(query.NewError(nil, "Panic In Exeuction Pipeline")) } }
func (this *Grouper) processItem(item *dparval.Value) bool { groupkey := dparval.NewValue(make([]interface{}, len(this.GroupBy))) for i, groupElement := range this.GroupBy { groupkeyval, err := this.Base.Evaluate(groupElement, item) if err == nil { groupkey.SetIndex(i, groupkeyval) } else { switch err := err.(type) { case *dparval.Undefined: // FIXME better way? groupkey.SetIndex(i, "__tuqtng__MISSING__") default: return this.Base.SendError(query.NewError(err, "error evaluating group by")) } } } // FIXME slow, but lets me use map to match same groups groupkeybytes := groupkey.Bytes() groupkeystring := string(groupkeybytes) group, ok := this.groups[groupkeystring] if !ok { // new group this.groups[groupkeystring] = item group = item this.setGroupDefaults(group) } this.updateGroup(group, item) return true }
func (p *pool) BucketByName(name string) (b catalog.Bucket, e query.Error) { b, ok := p.buckets[name] if !ok { return nil, query.NewError(nil, "Bucket "+name+" not found.") } return b, nil }
func (b *poolbucket) Count() (int64, query.Error) { poolIds, err := b.pool.site.actualSite.PoolIds() if err == nil { return int64(len(poolIds)), nil } return 0, query.NewError(err, "") }
func newBucket(p *pool, name string) (*bucket, query.Error) { clog.To(catalog.CHANNEL, "Created New Bucket %s", name) cbbucket, err := p.cbpool.GetBucket(name) if err != nil { // go-couchbase caches the buckets // to be sure no such bucket exists right now // we trigger a refresh p.refresh() // and then check one more time cbbucket, err = p.cbpool.GetBucket(name) if err != nil { // really no such bucket exists return nil, query.NewError(nil, fmt.Sprintf("Bucket %v not found.", name)) } } rv := &bucket{ pool: p, name: name, cbbucket: cbbucket, indexes: make(map[string]catalog.Index), } ierr := rv.loadIndexes() if err != nil { return nil, ierr } return rv, nil }
func (s *site) PoolByName(name string) (p catalog.Pool, e query.Error) { p, ok := s.pools[name] if !ok { return nil, query.NewError(nil, "Pool "+name+" not found.") } return p, nil }
// simplest possible implementation // 1. read all plans off plan channel // 2. return last plan func (this *SimpleOptimizer) Optimize(planChannel plan.PlanChannel, errChannel query.ErrorChannel) (*plan.Plan, query.Error) { plans := make([]plan.Plan, 0) var p plan.Plan var err query.Error ok := true for ok { select { case p, ok = <-planChannel: if ok { clog.To(optimizer.CHANNEL, "See plan %v", p) plans = append(plans, p) } case err, ok = <-errChannel: if err != nil { return nil, err } } } if len(plans) > 0 { chosenPlan := plans[len(plans)-1] clog.To(optimizer.CHANNEL, "Choosing plan %v", chosenPlan) return &chosenPlan, nil } return nil, query.NewError(nil, "No plans produced for optimizer to choose from") }
func (s *site) PoolByName(name string) (p catalog.Pool, e query.Error) { p, ok := s.pools[strings.ToUpper(name)] if !ok { e = query.NewError(nil, "Pool "+name+" not found.") } return }
func (p *pool) BucketByName(name string) (b catalog.Bucket, e query.Error) { b, ok := p.buckets[strings.ToUpper(name)] if !ok { e = query.NewError(nil, "Bucket "+name+" not found.") } return }
func (b *bucket) Fetch(id string) (item *dparval.Value, e query.Error) { i, err := strconv.Atoi(id) if err != nil { return nil, query.NewError(err, fmt.Sprintf("no mock item: %v", id)) } return genItem(i, b.nitems) }
func (this *KeyNest) flushBatch(baseItem *dparval.Value, ids []string) bool { bulkResponse, err := this.bucket.BulkFetch(ids) if err != nil { return this.Base.SendError(query.NewError(err, "error getting bulk response")) } // now we need to emit the bulk fetched items in the correct order (from the id list) for _, v := range ids { item, ok := bulkResponse[v] if ok { if this.Projection != nil { projectedVal, err := this.Base.projectedValueOfResultExpression(item, ast.NewResultExpression(this.Projection)) if err != nil { switch err := err.(type) { case *dparval.Undefined: // undefined contributes nothing to the result map continue default: return this.Base.SendError(query.NewError(err, "unexpected error projecting fetch expression")) } } else { this.Right = append(this.Right, projectedVal) } } else { this.Right = append(this.Right, item) } this.rowsFetched += 1 } } if len(this.Right) > 0 { baseItem.SetPath(this.As, this.Right) } // if the lenght of the array is 0 and the type of join is not LEFT // then we return an empty result for this evaluation if len(this.Right) == 0 && this.Type != "LEFT" { return true } this.Base.SendItem(baseItem) return true }
func genItem(i int, nitems int) (*dparval.Value, query.Error) { if i < 0 || i >= nitems { return nil, query.NewError(nil, fmt.Sprintf("item out of mock range: %v [0,%v)", i, nitems)) } id := strconv.Itoa(i) doc := dparval.NewValue(map[string]interface{}{"id": id, "i": float64(i)}) doc.SetAttachment("meta", map[string]interface{}{"id": id}) return doc, nil }
func (this *KeyJoin) Run(stopChannel misc.StopChannel) { clog.To(CHANNEL, "key join operator starting") if this.Base.Source != nil { this.Base.RunOperator(this, stopChannel) } else { this.Base.SendError(query.NewError(fmt.Errorf("missing source operator"), "")) } clog.To(CHANNEL, "key join operator finished, fetched %d", this.rowsFetched) }
func (b *sitebucket) Fetch(id string) (item *dparval.Value, e query.Error) { if id == b.pool.site.actualSite.Id() { doc := map[string]interface{}{ "id": b.pool.site.actualSite.Id(), "url": b.pool.site.actualSite.URL(), } return dparval.NewValue(doc), nil } return nil, query.NewError(nil, "Not Found") }
func (this *KeyJoin) flushBatch(baseItem *dparval.Value, ids []string) bool { bulkResponse, err := this.bucket.BulkFetch(ids) if err != nil { return this.Base.SendError(query.NewError(err, "error getting bulk response")) } // now we need to emit the bulk fetched items in the correct order (from the id list) for _, v := range ids { item, ok := bulkResponse[v] newItem := baseItem.Duplicate() if item == nil { if this.Type == "LEFT" { this.Base.SendItem(newItem) } continue } if ok { if this.Projection != nil { projectedVal, err := this.Base.projectedValueOfResultExpression(item, ast.NewResultExpression(this.Projection)) if err != nil { switch err := err.(type) { case *dparval.Undefined: // undefined contributes nothing to the result map continue default: return this.Base.SendError(query.NewError(err, "unexpected error projecting fetch expression")) } } else { newItem.SetPath(this.As, projectedVal) } } else { newItem.SetPath(this.As, item) } this.Base.SendItem(newItem) this.rowsFetched += 1 } } return true }