Пример #1
0
func (this *Order) Less(i, j int) bool {
	left := this.buffer[i]
	right := this.buffer[j]

	for _, oe := range this.OrderBy {
		leftVal, lerr := this.Base.Evaluate(oe.Expr, left)
		if lerr != nil {
			switch lerr := lerr.(type) {
			case *dparval.Undefined:
			default:
				clog.Error(lerr)
				return false
			}
		}
		rightVal, rerr := this.Base.Evaluate(oe.Expr, right)
		if rerr != nil {
			switch rerr := rerr.(type) {
			case *dparval.Undefined:
			default:
				clog.Error(rerr)
				return false
			}
		}

		// at this point, the only errors left should be MISSING/UNDEFINED
		if oe.Ascending && lerr != nil && rerr == nil {
			// ascending, left missing, right not, left is less
			return true
		} else if !oe.Ascending && rerr != nil && lerr == nil {
			// descending right missing, left not, left is more
			return true
		} else if !oe.Ascending && lerr != nil && rerr == nil {
			// descending, left missing, right not, left is less
			return false
		} else if oe.Ascending && rerr != nil && lerr == nil {
			//ascending, left not, left is more
			return false
		} else if lerr == nil && rerr == nil {
			lv := leftVal.Value()
			rv := rightVal.Value()

			// both not missing, compare values
			result := ast.CollateJSON(lv, rv)
			if result != 0 {
				if oe.Ascending && result < 0 {
					return true
				} else if !oe.Ascending && result > 0 {
					return true
				} else {
					return false
				}
			}
		}
		// at this level they are the same, keep going
	}

	// if we go to this point the order expressions could not differentiate between the elements
	return false
}
Пример #2
0
func (this *FastCount) RecoverPanic() {
	r := recover()
	if r != nil {
		clog.Error(fmt.Errorf("Query Execution Panic: %v\n%s", r, debug.Stack()))
		this.SendError(query.NewError(nil, "Panic In Exeuction Pipeline"))
	}
}
Пример #3
0
func main() {

	flag.Parse()

	clog.EnableKey("PARSER")
	clog.EnableKey("SCANNER")

	parser := goyacc.NewN1qlParserWithDebug(*debugLevel)

	reader := bufio.NewReader(os.Stdin)

	for {
		line, err := reader.ReadString('\n')
		if err != nil {
			break
		}

		if line == "" {
			continue
		}

		ast, err := parser.Parse(line)
		if err != nil {
			clog.Error(err)
		} else {
			fmt.Printf("%v\n", ast)
		}
	}

}
Пример #4
0
func HandleInteractiveMode(tiServer, prompt string) {

	// try to find a HOME environment variable
	homeDir := os.Getenv("HOME")
	if homeDir == "" {
		// then try USERPROFILE for Windows
		homeDir = os.Getenv("USERPROFILE")
		if homeDir == "" {
			fmt.Printf("Unable to determine home directory, history file disabled\n")
		}
	}

	var liner = liner.NewLiner()
	defer liner.Close()

	LoadHistory(liner, homeDir)

	go signalCatcher(liner)

	// state for reading a multi-line query
	queryLines := []string{}
	fullPrompt := prompt + QRY_PROMPT1
	for {
		line, err := liner.Prompt(fullPrompt)
		if err != nil {
			break
		}

		line = strings.TrimSpace(line)
		if line == "" {
			continue
		}

		// Building query string mode: set prompt, gather current line
		fullPrompt = QRY_PROMPT2
		queryLines = append(queryLines, line)

		// If the current line ends with a QRY_EOL, join all query lines,
		// trim off trailing QRY_EOL characters, and submit the query string:
		if strings.HasSuffix(line, QRY_EOL) {
			queryString := strings.Join(queryLines, " ")
			for strings.HasSuffix(queryString, QRY_EOL) {
				queryString = strings.TrimSuffix(queryString, QRY_EOL)
			}
			if queryString != "" {
				UpdateHistory(liner, homeDir, queryString+QRY_EOL)
				err = execute_internal(tiServer, queryString, os.Stdout)
				if err != nil {
					clog.Error(err)
				}
			}
			// reset state for multi-line query
			queryLines = []string{}
			fullPrompt = prompt + QRY_PROMPT1
		}
	}

}
Пример #5
0
func WalkViewInBatches(result chan cb.ViewRow, errs query.ErrorChannel, bucket *cb.Bucket,
	ddoc string, view string, options map[string]interface{}, batchSize int64, limit int64) {

	if limit != 0 && limit < batchSize {
		batchSize = limit
	}

	defer close(result)
	defer close(errs)

	defer func() {
		r := recover()
		if r != nil {
			clog.Error(fmt.Errorf("View Walking Panic: %v\n%s", r, debug.Stack()))
			errs <- query.NewError(nil, "Panic In View Walking")
		}
	}()

	options["limit"] = batchSize + 1

	numRead := int64(0)
	ok := true
	for ok {

		logURL, err := bucket.ViewURL(ddoc, view, options)
		if err == nil {
			clog.To(NETWORK_CHANNEL, "Request View: %v", logURL)
		}
		vres, err := bucket.View(ddoc, view, options)
		if err != nil {
			errs <- query.NewError(err, "Unable to access view")
			return
		}

		for i, row := range vres.Rows {
			if int64(i) < batchSize {
				// dont process the last row, its just used to see if we
				// need to continue processing
				result <- row
				numRead += 1
			}
		}

		if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) {
			// prepare for next run
			skey := vres.Rows[batchSize].Key
			skeydocid := vres.Rows[batchSize].ID
			options["startkey"] = skey
			options["startkey_docid"] = cb.DocID(skeydocid)
		} else {
			// stop
			ok = false
		}
	}
}
Пример #6
0
func (this *Scan) buildValue(key ast.Expression, val *dparval.Value) *dparval.Value {
	doc := dparval.NewValue(map[string]interface{}{})
	switch key := key.(type) {
	case *ast.LiteralNumber:
		doc = dparval.NewValue([]interface{}{})
		index := int(key.Val)
		doc.SetIndex(index, val)
	case *ast.Property:
		doc.SetPath(key.Path, val)
	case *ast.DotMemberOperator:
		doc = this.buildValue(key.Left, this.buildValue(key.Right, val))
	case *ast.BracketMemberOperator:
		doc = this.buildValue(key.Left, this.buildValue(key.Right, val))
	default:
		clog.Error(fmt.Errorf("Unsupported key type %T encountered uncovering query", key))
	}
	return doc
}
Пример #7
0
func HandleInteractiveMode(tiServer, prompt string) {

	// try to find a HOME environment variable
	homeDir := os.Getenv("HOME")
	if homeDir == "" {
		// then try USERPROFILE for Windows
		homeDir = os.Getenv("USERPROFILE")
		if homeDir == "" {
			fmt.Printf("Unable to determine home directory, history file disabled\n")
		}
	}

	var liner = liner.NewLiner()
	defer liner.Close()

	LoadHistory(liner, homeDir)

	go signalCatcher(liner)

	for {
		line, err := liner.Prompt(prompt + "> ")
		if err != nil {
			break
		}

		if line == "" {
			continue
		}

		UpdateHistory(liner, homeDir, line)
		err = execute_internal(tiServer, line, os.Stdout)
		if err != nil {
			clog.Error(err)
		}
	}

}
Пример #8
0
func CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index catalog.RangeIndex, resultExprList ast.ResultExpressionList, bucket string) (bool, plan.ScanRanges, ast.Expression, error) {

	// convert the index key to formal notation
	indexKeyFormal, err := IndexKeyInFormalNotation(index.Key(), bucket)
	if err != nil {
		return false, nil, nil, err
	}

	// FIXME only looking at first element in key right now
	deps := ast.ExpressionList{indexKeyFormal[0]}
	clog.To(planner.CHANNEL, "index deps are: %v", deps)
	depChecker := ast.NewExpressionFunctionalDependencyCheckerFull(deps)

	// start looking at the projection
	allAggregateFunctionsMin := true
	for _, resultExpr := range resultExprList {

		// presence of * means we cannot use index on field, must see all (for this particular optimization)
		if resultExpr.Star {
			return false, nil, nil, nil
		}

		switch expr := resultExpr.Expr.(type) {
		case ast.AggregateFunctionCallExpression:
			_, isMin := expr.(*ast.FunctionCallMin)
			if !isMin {
				clog.To(planner.CHANNEL, "projection not MIN")
				allAggregateFunctionsMin = false
			}
			// aggregates all take 1 operand
			operands := expr.GetOperands()
			if len(operands) < 1 {
				return false, nil, nil, nil
			}
			aggOperand := operands[0]
			// preence of * means we cannot use this index, must see all (for this particular optimization)
			if aggOperand.Star {
				return false, nil, nil, nil
			}
			// look at dependencies inside this operand
			_, err := depChecker.Visit(aggOperand.Expr)
			if err != nil {
				return false, nil, nil, nil
			}
		default:
			// all expressions must be aggregates for this particular optimization
			return false, nil, nil, nil
		}
	}

	// if we made it this far, we can in fact use the index
	// doing a scan of all non-eliminatable items (non-NULL, non-MISSING)
	dummyOp := ast.NewIsNotNullOperator(indexKeyFormal[0])
	es := NewExpressionSargable(indexKeyFormal[0])
	dummyOp.Accept(es)
	if es.IsSargable() {
		ranges := es.ScanRanges()
		if allAggregateFunctionsMin {
			for _, r := range ranges {
				r.Limit = 1
			}
		}
		return true, ranges, nil, nil
	}
	clog.Error(fmt.Errorf("expected this to never happen"))

	// cannot use this index
	return false, nil, nil, nil
}
Пример #9
0
func (this *SimplePlanner) buildSelectStatementPlans(stmt *ast.SelectStatement, pc plan.PlanChannel, ec query.ErrorChannel) {

	var planHeads []plan.PlanElement

	from := stmt.GetFrom()
	if from == nil {
		// point to :system.dual
		from = &ast.From{Pool: system.POOL_NAME, Bucket: system.BUCKET_NAME_DUAL}
	}

	// get the pool
	poolName := from.Pool
	if poolName == "" {
		poolName = this.defaultPool
	}

	pool, err := this.site.PoolByName(poolName)
	if err != nil {
		ec <- query.NewPoolDoesNotExist(poolName)
		return
	}

	bucket, err := pool.BucketByName(from.Bucket)
	if err != nil {
		ec <- query.NewBucketDoesNotExist(from.Bucket)
		return
	}

	// find all docs index
	indexes, err := bucket.Indexes()
	if err != nil {
		ec <- query.NewError(err, fmt.Sprintf("No indexes found for bucket %v", from.Bucket))
		return
	}

	var keylist []string
	if stmt.Keys != nil {
		keylist = stmt.Keys.GetKeys()
	}

	clog.To(planner.CHANNEL, "Indexes in bucket %v", indexes)

	if keylist == nil {
		for _, index := range indexes {
			var lastStep plan.PlanElement

			switch index := index.(type) {
			case catalog.PrimaryIndex:
				clog.To(planner.CHANNEL, "See primary index %v", index.Name())
				// if from.Over == nil && stmt.Where == nil && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 && CanFastCountBucket(stmt.Select) {
				// 	lastStep = plan.NewFastCount(pool.Name(), bucket.Name(), "", nil, nil)
				// } else {
				lastStep = plan.NewScan(pool.Name(), bucket.Name(), index.Name(), nil)
				// }
			case catalog.RangeIndex:
				// see if this index can be used
				clog.To(planner.CHANNEL, "See index %v", index.Name())
				clog.To(planner.CHANNEL, "with Key %v", index.Key())
				if stmt.Where != nil && from.Projection == nil {
					possible, ranges, _, err := CanIUseThisIndexForThisWhereClause(index, stmt.Where, stmt.From.As)
					if err != nil {
						clog.Error(err)
						continue
					}
					clog.To(planner.CHANNEL, "Can I use it1: %v", possible)
					if possible {

						// great, but lets check for a min optimizatin too
						if stmt.GroupBy != nil && len(stmt.GroupBy) == 0 {
							possible, minranges, _, _ := CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index, stmt.Select, stmt.From.As)
							if possible {
								for _, r := range ranges {
									r.Limit = minranges[0].Limit
								}
							}
						}

						scan := plan.NewScan(pool.Name(), bucket.Name(), index.Name(), ranges)
						// see if this index covers the query
						if DoesIndexCoverStatement(index, stmt) {
							scan.Cover = true
							scan.As = from.As
						}
						lastStep = scan
					} else {
						continue
					}
				} else if from.Projection == nil {

					// try to do a fast count if its possible
					doingFastCount := false
					// countIndex, isCountIndex := index.(catalog.CountIndex)
					// if isCountIndex {
					// 	fastCountIndexOnExpr := CanFastCountIndex(countIndex, stmt.From.As, stmt.Select)
					// 	if fastCountIndexOnExpr != nil && from.Over == nil && stmt.Where == nil && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 {
					// 		lastStep = plan.NewFastCount(pool.Name(), bucket.Name(), countIndex.Name(), fastCountIndexOnExpr, nil)
					// 		doingFastCount = true
					// 	}

					// }

					// this works for aggregates on the whole bucket
					if !doingFastCount && stmt.GroupBy != nil && len(stmt.GroupBy) == 0 {
						possible, ranges, _, err := CanIUseThisIndexForThisProjectionNoWhereNoGroupClause(index, stmt.Select, stmt.From.As)
						if err != nil {
							clog.Error(err)
							continue
						}
						clog.To(planner.CHANNEL, "Can I use it2: %v", possible)
						if possible {
							lastStep = plan.NewScan(pool.Name(), bucket.Name(), index.Name(), ranges)
						} else {
							continue
						}
					} else if !doingFastCount {
						continue
					}
				}

			default:
				clog.To(planner.CHANNEL, "Unsupported type of index %T", index)
				continue
			}
			scanOp, lastStepWasScan := lastStep.(*plan.Scan)
			if lastStepWasScan {
				if !scanOp.Cover {
					lastStep = plan.NewFetch(lastStep, pool.Name(), bucket.Name(), from.Projection, from.As)
					nextFrom := from.Over
					for nextFrom != nil {
						// add document joins
						if nextFrom.Keys != nil {
							// This is a key-join
							lastStep = plan.NewKeyJoin(lastStep, pool.Name(), nextFrom.Bucket, nextFrom.Projection, nextFrom.Type, nextFrom.Oper, *nextFrom.Keys, nextFrom.As)
						} else {
							lastStep = plan.NewUnnest(lastStep, nextFrom.Projection, nextFrom.Type, nextFrom.As)
						}
						nextFrom = nextFrom.Over
					}
				}
			}
			planHeads = append(planHeads, lastStep)

		}
	} else if keylist != nil {
		// if keylist is present then we avoid a bucket scan
		var lastStep plan.PlanElement
		lastStep = plan.NewKeyScan(keylist)
		lastStep = plan.NewFetch(lastStep, pool.Name(), bucket.Name(), from.Projection, from.As)
		nextFrom := from.Over
		for nextFrom != nil {
			// add in-document joins
			if nextFrom.Keys != nil {
				lastStep = plan.NewKeyJoin(lastStep, pool.Name(), nextFrom.Bucket, nextFrom.Projection, nextFrom.Type, nextFrom.Oper, *nextFrom.Keys, nextFrom.As)
			} else {
				lastStep = plan.NewUnnest(lastStep, nextFrom.Projection, nextFrom.Type, nextFrom.As)
			}
			nextFrom = nextFrom.Over
		}
		planHeads = append(planHeads, lastStep)
	}

	if len(planHeads) == 0 {
		ec <- query.NewError(nil, fmt.Sprintf("No usable indexes found for bucket %v", from.Bucket))
		return
	}

	// now for all the plan heads, create a full plan
	for _, lastStep := range planHeads {

		if stmt.GetWhere() != nil {
			ids := WhereClauseFindById(stmt.GetWhere())
			fetch, ok := lastStep.(*plan.Fetch)
			if ids != nil && ok {
				fetch.ConvertToIds(ids)
			} else {
				lastStep = plan.NewFilter(lastStep, stmt.GetWhere())
			}
		}

		if stmt.GetGroupBy() != nil {
			_, isFastCount := lastStep.(*plan.FastCount)
			if !isFastCount {
				lastStep = plan.NewGroup(lastStep, stmt.GetGroupBy(), stmt.GetAggregateReferences())
			}
		}

		if stmt.GetHaving() != nil {
			lastStep = plan.NewFilter(lastStep, stmt.GetHaving())
		}

		lastStep = plan.NewProjector(lastStep, stmt.GetResultExpressionList(), true)

		if stmt.IsDistinct() {
			lastStep = plan.NewEliminateDuplicates(lastStep)
		}

		if stmt.GetOrderBy() != nil {
			explicitAliases := stmt.GetExplicitProjectionAliases()
			lastStep = plan.NewOrder(lastStep, stmt.GetOrderBy(), explicitAliases)
		}

		if stmt.GetOffset() != 0 {
			lastStep = plan.NewOffset(lastStep, stmt.GetOffset())
		}

		if stmt.GetLimit() >= 0 {
			lastStep = plan.NewLimit(lastStep, stmt.GetLimit())
		}

		if stmt.ExplainOnly {
			lastStep = plan.NewExplain(lastStep)
		}

		pc <- plan.Plan{Root: lastStep}

	}

}
Пример #10
0
func (vi *viewIndex) ScanRange(low catalog.LookupValue, high catalog.LookupValue, inclusion catalog.RangeInclusion, limit int64, ch catalog.EntryChannel, warnch, errch query.ErrorChannel) {

	defer close(ch)
	defer close(warnch)
	defer close(errch)

	viewOptions := generateViewOptions(low, high, inclusion)

	viewRowChannel := make(chan cb.ViewRow)
	viewErrChannel := make(query.ErrorChannel)
	go WalkViewInBatches(viewRowChannel, viewErrChannel, vi.bucket.cbbucket, vi.DDocName(), vi.ViewName(), viewOptions, 1000, limit)

	var viewRow cb.ViewRow
	var err query.Error
	sentRows := false
	ok := true
	for ok {
		select {
		case viewRow, ok = <-viewRowChannel:
			if ok {
				entry := catalog.IndexEntry{PrimaryKey: viewRow.ID}

				// try to add the view row key as the entry key (unless this is _all_docs)
				if vi.DDocName() != "" {
					lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key)
					if err == nil {
						entry.EntryKey = lookupValue
					} else {
						clog.To(catalog.CHANNEL, "unable to convert index key to lookup value:%v", err)
					}
				}

				ch <- &entry
				sentRows = true
			}
		case err, ok = <-viewErrChannel:
			if err != nil {
				clog.Error(err)
				// check to possibly detect a bucket that was already deleted
				if !sentRows {
					clog.Printf("Checking bucket URI: %v", vi.bucket.cbbucket.URI)
					_, err := http.Get(vi.bucket.cbbucket.URI)
					if err != nil {
						clog.Error(err)
						// remove this specific bucket from the pool cache
						delete(vi.bucket.pool.bucketCache, vi.bucket.Name())
						// close this bucket
						vi.bucket.Release()
						// ask the pool to refresh
						vi.bucket.pool.refresh()
						// bucket doesnt exist any more
						errch <- query.NewError(nil, fmt.Sprintf("Bucket %v not found.", vi.bucket.Name()))
						return
					}

				}

				errch <- err
				return
			}
		}
	}
}