コード例 #1
0
ファイル: evaluator.go プロジェクト: knz/cockroach
func newEvaluator(
	flowCtx *FlowCtx, spec *EvaluatorSpec, input RowSource, output RowReceiver,
) (*evaluator, error) {
	ev := &evaluator{
		input:  input,
		output: output,
		ctx:    log.WithLogTag(flowCtx.Context, "Evaluator", nil),
		exprs:  make([]exprHelper, len(spec.Exprs)),
		render: make([]parser.TypedExpr, len(spec.Exprs)),
		tuple:  make(parser.DTuple, len(spec.Exprs)),
	}

	for i, expr := range spec.Exprs {
		err := ev.exprs[i].init(expr, spec.Types, flowCtx.evalCtx)
		if err != nil {
			return nil, err
		}
	}

	// Loop over the expressions in our expression set and extract out fully
	// typed expressions, this will later be evaluated for each input row to
	// construct our output row.
	for i := range ev.exprs {
		typedExpr, err := (&ev.exprs[i]).expr.TypeCheck(nil, parser.NoTypePreference)
		if err != nil {
			return nil, err
		}
		ev.render[i] = typedExpr
	}

	return ev, nil
}
コード例 #2
0
ファイル: joinerbase.go プロジェクト: BramGruneir/cockroach
func (jb *joinerBase) init(
	flowCtx *FlowCtx,
	inputs []RowSource,
	output RowReceiver,
	outputCols []uint32,
	jType JoinType,
	leftTypes []*sqlbase.ColumnType,
	rightTypes []*sqlbase.ColumnType,
	expr Expression,
) error {
	jb.inputs = inputs
	jb.output = output
	jb.ctx = log.WithLogTag(flowCtx.Context, "Joiner", nil)
	jb.outputCols = columns(outputCols)
	jb.joinType = joinType(jType)
	jb.emptyLeft = make(sqlbase.EncDatumRow, len(leftTypes))
	for i := range jb.emptyLeft {
		jb.emptyLeft[i].Datum = parser.DNull
	}

	jb.emptyRight = make(sqlbase.EncDatumRow, len(rightTypes))
	for i := range jb.emptyRight {
		jb.emptyRight[i].Datum = parser.DNull
	}

	return jb.filter.init(expr, append(leftTypes, rightTypes...), flowCtx.evalCtx)
}
コード例 #3
0
ファイル: sorter.go プロジェクト: EvilMcJerkface/cockroach
func newSorter(flowCtx *FlowCtx, spec *SorterSpec, input RowSource, output RowReceiver) *sorter {
	return &sorter{
		input:    input,
		output:   output,
		ordering: convertToColumnOrdering(spec.OutputOrdering),
		matchLen: spec.OrderingMatchLen,
		limit:    spec.Limit,
		ctx:      log.WithLogTag(flowCtx.Context, "Sorter", nil),
	}
}
コード例 #4
0
ファイル: gossip.go プロジェクト: knz/cockroach
// bootstrap connects the node to the gossip network. Bootstrapping
// commences in the event there are no connected clients or the
// sentinel gossip info is not available. After a successful bootstrap
// connection, this method will block on the stalled condvar, which
// receives notifications that gossip network connectivity has been
// lost and requires re-bootstrapping.
func (g *Gossip) bootstrap() {
	g.server.stopper.RunWorker(func() {
		ctx := g.AnnotateCtx(context.Background())
		ctx = log.WithLogTag(ctx, "bootstrap", nil)
		var bootstrapTimer timeutil.Timer
		defer bootstrapTimer.Stop()
		for {
			if g.server.stopper.RunTask(func() {
				g.mu.Lock()
				defer g.mu.Unlock()
				haveClients := g.outgoing.len() > 0
				haveSentinel := g.mu.is.getInfo(KeySentinel) != nil
				log.Eventf(ctx, "have clients: %t, have sentinel: %t", haveClients, haveSentinel)
				if !haveClients || !haveSentinel {
					// Try to get another bootstrap address from the resolvers.
					if addr := g.getNextBootstrapAddress(); addr != nil {
						g.startClient(addr, g.NodeID.Get())
					} else {
						bootstrapAddrs := make([]string, 0, len(g.bootstrapping))
						for addr := range g.bootstrapping {
							bootstrapAddrs = append(bootstrapAddrs, addr)
						}
						log.Eventf(ctx, "no next bootstrap address; currently bootstrapping: %v", bootstrapAddrs)
						// We couldn't start a client, signal that we're stalled so that
						// we'll retry.
						g.maybeSignalStatusChangeLocked()
					}
				}
			}) != nil {
				return
			}

			// Pause an interval before next possible bootstrap.
			bootstrapTimer.Reset(g.bootstrapInterval)
			log.Eventf(ctx, "sleeping %s until bootstrap", g.bootstrapInterval)
			select {
			case <-bootstrapTimer.C:
				bootstrapTimer.Read = true
				// break
			case <-g.server.stopper.ShouldStop():
				return
			}
			log.Eventf(ctx, "idling until bootstrap required")
			// Block until we need bootstrapping again.
			select {
			case <-g.stalledCh:
				log.Eventf(ctx, "detected stall; commencing bootstrap")
				// break
			case <-g.server.stopper.ShouldStop():
				return
			}
		}
	})
}
コード例 #5
0
ファイル: updates.go プロジェクト: knz/cockroach
// If the time is greater than the timestamp stored at `key`, run `f`.
// Before running `f`, the timestamp is updated forward by a small amount via
// a compare-and-swap to ensure at-most-one concurrent execution. After `f`
// executes the timestamp is set to the next execution time.
// Returns how long until `f` should be run next (i.e. when this method should
// be called again).
func (s *Server) maybeRunPeriodicCheck(
	op string, key roachpb.Key, f func(context.Context),
) time.Duration {
	ctx, span := s.AnnotateCtxWithSpan(context.Background(), "op")
	defer span.Finish()

	// Add the op name to the log context.
	ctx = log.WithLogTag(ctx, op, nil)

	resp, err := s.db.Get(ctx, key)
	if err != nil {
		log.Infof(ctx, "error reading time: %s", err)
		return updateCheckRetryFrequency
	}

	// We should early returned below if either the next check time is in the
	// future or if the atomic compare-and-set of that time failed (which
	// would happen if two nodes tried at the same time).
	if resp.Exists() {
		whenToCheck, pErr := resp.Value.GetTime()
		if pErr != nil {
			log.Warningf(ctx, "error decoding time: %s", err)
			return updateCheckRetryFrequency
		} else if delay := whenToCheck.Sub(timeutil.Now()); delay > 0 {
			return delay
		}

		nextRetry := whenToCheck.Add(updateCheckRetryFrequency)
		if err := s.db.CPut(ctx, key, nextRetry, whenToCheck); err != nil {
			if log.V(2) {
				log.Infof(ctx, "could not set next version check time (maybe another node checked?): %s", err)
			}
			return updateCheckRetryFrequency
		}
	} else {
		log.Infof(ctx, "No previous %s time.", op)
		nextRetry := timeutil.Now().Add(updateCheckRetryFrequency)
		// CPut with `nil` prev value to assert that no other node has checked.
		if err := s.db.CPut(ctx, key, nextRetry, nil); err != nil {
			if log.V(2) {
				log.Infof(ctx, "Could not set %s time (maybe another node checked?): %v", op, err)
			}
			return updateCheckRetryFrequency
		}
	}

	f(ctx)

	if err := s.db.Put(ctx, key, timeutil.Now().Add(updateCheckFrequency)); err != nil {
		log.Infof(ctx, "Error updating %s time: %v", op, err)
	}
	return updateCheckFrequency
}
コード例 #6
0
ファイル: distinct.go プロジェクト: knz/cockroach
func newDistinct(
	flowCtx *FlowCtx, spec *DistinctSpec, input RowSource, output RowReceiver,
) (*distinct, error) {
	d := &distinct{
		input:       input,
		output:      output,
		ctx:         log.WithLogTag(flowCtx.Context, "Evaluator", nil),
		orderedCols: make(map[uint32]struct{}),
	}
	for _, ord := range spec.Ordering.Columns {
		d.orderedCols[ord.ColIdx] = struct{}{}
	}

	return d, nil
}
コード例 #7
0
ファイル: evaluator.go プロジェクト: jmptrader/cockroach
func newEvaluator(
	flowCtx *FlowCtx, spec *EvaluatorSpec, input RowSource, output RowReceiver,
) (*evaluator, error) {
	ev := &evaluator{
		flowCtx:   flowCtx,
		input:     input,
		output:    output,
		specExprs: spec.Exprs,
		ctx:       log.WithLogTag(flowCtx.Context, "Evaluator", nil),
		exprs:     make([]exprHelper, len(spec.Exprs)),
		exprTypes: make([]sqlbase.ColumnType_Kind, len(spec.Exprs)),
	}

	return ev, nil
}
コード例 #8
0
ファイル: aggregator.go プロジェクト: BramGruneir/cockroach
func newAggregator(
	ctx *FlowCtx, spec *AggregatorSpec, input RowSource, output RowReceiver,
) (*aggregator, error) {
	ag := &aggregator{
		input:       input,
		output:      output,
		ctx:         log.WithLogTag(ctx.Context, "Agg", nil),
		rows:        &RowBuffer{},
		buckets:     make(map[string]struct{}),
		inputCols:   make(columns, len(spec.Exprs)),
		outputTypes: make([]*sqlbase.ColumnType, len(spec.Exprs)),
		groupCols:   make(columns, len(spec.GroupCols)),
	}

	inputTypes := make([]*sqlbase.ColumnType, len(spec.Exprs))
	for i, expr := range spec.Exprs {
		ag.inputCols[i] = expr.ColIdx
		inputTypes[i] = spec.Types[expr.ColIdx]
	}
	copy(ag.groupCols, spec.GroupCols)

	// Loop over the select expressions and extract any aggregate functions --
	// non-aggregation functions are replaced with parser.NewIdentAggregate,
	// (which just returns the last value added to them for a bucket) to provide
	// grouped-by values for each bucket.  ag.funcs is updated to contain all
	// the functions which need to be fed values.
	eh := &exprHelper{types: inputTypes}
	eh.vars = parser.MakeIndexedVarHelper(eh, len(eh.types))
	for i, expr := range spec.Exprs {
		fn, retType, err := ag.extractFunc(expr, eh)
		if err != nil {
			return nil, err
		}
		ag.funcs = append(ag.funcs, fn)

		// The aggregate function extracted is an identity function, the return
		// type of this therefore being the i-th input type.
		if retType == nil {
			ag.outputTypes[i] = inputTypes[i]
		} else {
			typ := sqlbase.DatumTypeToColumnType(retType)
			ag.outputTypes[i] = &typ
		}
	}

	return ag, nil
}
コード例 #9
0
ファイル: executor.go プロジェクト: hvaara/cockroach
// NewExecutor creates an Executor and registers a callback on the
// system config.
func NewExecutor(
	cfg ExecutorConfig, stopper *stop.Stopper, startupMemMetrics *MemoryMetrics,
) *Executor {
	exec := &Executor{
		cfg:     cfg,
		reCache: parser.NewRegexpCache(512),

		Latency:          metric.NewLatency(MetaLatency, cfg.MetricsSampleInterval),
		TxnBeginCount:    metric.NewCounter(MetaTxnBegin),
		TxnCommitCount:   metric.NewCounter(MetaTxnCommit),
		TxnAbortCount:    metric.NewCounter(MetaTxnAbort),
		TxnRollbackCount: metric.NewCounter(MetaTxnRollback),
		SelectCount:      metric.NewCounter(MetaSelect),
		UpdateCount:      metric.NewCounter(MetaUpdate),
		InsertCount:      metric.NewCounter(MetaInsert),
		DeleteCount:      metric.NewCounter(MetaDelete),
		DdlCount:         metric.NewCounter(MetaDdl),
		MiscCount:        metric.NewCounter(MetaMisc),
		QueryCount:       metric.NewCounter(MetaQuery),
	}

	exec.systemConfigCond = sync.NewCond(exec.systemConfigMu.RLocker())

	gossipUpdateC := cfg.Gossip.RegisterSystemConfigChannel()
	stopper.RunWorker(func() {
		for {
			select {
			case <-gossipUpdateC:
				sysCfg, _ := cfg.Gossip.GetSystemConfig()
				exec.updateSystemConfig(sysCfg)
			case <-stopper.ShouldStop():
				return
			}
		}
	})

	ctx := log.WithLogTag(context.Background(), "startup", nil)
	startupSession := NewSession(ctx, SessionArgs{}, exec, nil, startupMemMetrics)
	if err := exec.virtualSchemas.init(&startupSession.planner); err != nil {
		log.Fatal(ctx, err)
	}
	startupSession.Finish(exec)

	return exec
}
コード例 #10
0
ファイル: range_cache.go プロジェクト: knz/cockroach
// performRangeLookup handles delegating the range lookup to the cache's
// RangeDescriptorDB.
func (rdc *rangeDescriptorCache) performRangeLookup(
	ctx context.Context, key roachpb.RKey, useReverseScan bool,
) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, error) {
	// metadataKey is sent to RangeLookup to find the RangeDescriptor
	// which contains key.
	metadataKey, err := meta(key)
	if err != nil {
		return nil, nil, err
	}

	// desc is the RangeDescriptor for the range which contains metadataKey.
	var desc *roachpb.RangeDescriptor
	switch {
	case bytes.Equal(metadataKey, roachpb.RKeyMin):
		// In this case, the requested key is stored in the cluster's first
		// range. Return the first range, which is always gossiped and not
		// queried from the datastore.
		var err error
		if desc, err = rdc.db.FirstRange(); err != nil {
			return nil, nil, err
		}
		return []roachpb.RangeDescriptor{*desc}, nil, nil
	case bytes.HasPrefix(metadataKey, keys.Meta1Prefix):
		// In this case, desc is the cluster's first range.
		var err error
		if desc, err = rdc.db.FirstRange(); err != nil {
			return nil, nil, err
		}
	default:
		// Look up desc from the cache, which will recursively call into
		// this function if it is not cached.
		var err error
		if desc, _, err = rdc.LookupRangeDescriptor(
			ctx, metadataKey, nil, useReverseScan,
		); err != nil {
			return nil, nil, err
		}
	}
	// Tag inner operations.
	ctx = log.WithLogTag(ctx, "range-lookup", nil)
	descs, prefetched, pErr := rdc.db.RangeLookup(ctx, metadataKey, desc, useReverseScan)
	return descs, prefetched, pErr.GoError()
}
コード例 #11
0
func newAggregator(
	ctx *FlowCtx, spec *AggregatorSpec, input RowSource, output RowReceiver,
) (*aggregator, error) {
	ag := &aggregator{
		input:       input,
		output:      output,
		ctx:         log.WithLogTag(ctx.Context, "Agg", nil),
		rows:        &RowBuffer{},
		buckets:     make(map[string]struct{}),
		inputCols:   make(columns, len(spec.Aggregations)),
		funcs:       make([]*aggregateFuncHolder, len(spec.Aggregations)),
		outputTypes: make([]sqlbase.ColumnType, len(spec.Aggregations)),
		groupCols:   make(columns, len(spec.GroupCols)),
	}

	for i, aggInfo := range spec.Aggregations {
		ag.inputCols[i] = aggInfo.ColIdx
	}
	copy(ag.groupCols, spec.GroupCols)

	// Loop over the select expressions and extract any aggregate functions --
	// non-aggregation functions are replaced with parser.NewIdentAggregate,
	// (which just returns the last value added to them for a bucket) to provide
	// grouped-by values for each bucket.  ag.funcs is updated to contain all
	// the functions which need to be fed values.
	inputTypes := input.Types()
	for i, aggInfo := range spec.Aggregations {
		aggConstructor, retType, err := GetAggregateInfo(aggInfo.Func, inputTypes[aggInfo.ColIdx])
		if err != nil {
			return nil, err
		}

		ag.funcs[i] = ag.newAggregateFuncHolder(aggConstructor)
		if aggInfo.Distinct {
			ag.funcs[i].seen = make(map[string]struct{})
		}

		ag.outputTypes[i] = retType
	}

	return ag, nil
}
コード例 #12
0
ファイル: node.go プロジェクト: knz/cockroach
// startWriteSummaries begins periodically persisting status summaries for the
// node and its stores.
func (n *Node) startWriteSummaries(frequency time.Duration) {
	ctx := log.WithLogTag(n.AnnotateCtx(context.Background()), "summaries", nil)
	// Immediately record summaries once on server startup.
	n.stopper.RunWorker(func() {
		// Write a status summary immediately; this helps the UI remain
		// responsive when new nodes are added.
		if err := n.writeSummaries(ctx); err != nil {
			log.Warningf(ctx, "error recording initial status summaries: %s", err)
		}
		ticker := time.NewTicker(frequency)
		defer ticker.Stop()
		for {
			select {
			case <-ticker.C:
				if err := n.writeSummaries(ctx); err != nil {
					log.Warningf(ctx, "error recording status summaries: %s", err)
				}
			case <-n.stopper.ShouldStop():
				return
			}
		}
	})
}