コード例 #1
0
ファイル: sql_util.go プロジェクト: EvilMcJerkface/cockroach
// printQueryOutput takes a list of column names and a list of row contents
// writes a pretty table to 'w', or "OK" if empty.
func printQueryOutput(w io.Writer, cols []string, allRows [][]string, tag string, pretty bool) {
	if len(cols) == 0 {
		// This operation did not return rows, just show the tag.
		fmt.Fprintln(w, tag)
		return
	}

	if pretty {
		// Initialize tablewriter and set column names as the header row.
		table := tablewriter.NewWriter(w)
		table.SetAutoFormatHeaders(false)
		table.SetAutoWrapText(false)
		table.SetHeader(cols)
		for _, row := range allRows {
			for i, r := range row {
				row[i] = expandTabsAndNewLines(r)
				if firstChar, _ := utf8.DecodeRuneInString(row[i]); unicode.IsSpace(firstChar) {
					// table.Append below trims whitespace in order to compute
					// the padding. This is unfortunate, since leading spaces
					// can be usefully exploited for formatting. Disable
					// trimming by inserting an invisible unicode character
					// (left-to-right mark) at the start.
					// TODO(knz) We can revisit this once
					// https://github.com/olekukonko/tablewriter/issues/52 is
					// fixed (or we use another table writer).
					row[i] = "\u200C" + row[i]
				}
			}
			table.Append(row)
		}
		table.Render()
		nRows := len(allRows)
		fmt.Fprintf(w, "(%d row%s)\n", nRows, util.Pluralize(int64(nRows)))
	} else {
		if len(cols) == 0 {
			// No result selected, inform the user.
			fmt.Fprintln(w, tag)
		} else {
			// Some results selected, inform the user about how much data to expect.
			fmt.Fprintf(w, "%d row%s\n", len(allRows),
				util.Pluralize(int64(len(allRows))))

			// Then print the results themselves.
			fmt.Fprintln(w, strings.Join(cols, "\t"))
			for _, row := range allRows {
				fmt.Fprintln(w, strings.TrimRight(strings.Join(row, "\t"), "\t "))
			}
		}
	}
}
コード例 #2
0
ファイル: sql_util.go プロジェクト: knz/cockroach
// printQueryOutput takes a list of column names and a list of row contents
// writes a pretty table to 'w', or "OK" if empty.
func printQueryOutput(w io.Writer, cols []string, allRows [][]string, tag string, pretty bool) {
	if len(cols) == 0 {
		// This operation did not return rows, just show the tag.
		fmt.Fprintln(w, tag)
		return
	}

	if pretty {
		// Initialize tablewriter and set column names as the header row.
		table := tablewriter.NewWriter(w)
		table.SetAutoFormatHeaders(false)
		table.SetAutoWrapText(false)
		table.SetHeader(cols)
		for _, row := range allRows {
			for i, r := range row {
				row[i] = expandTabsAndNewLines(r)
			}
			table.Append(row)
		}
		table.Render()
		nRows := len(allRows)
		fmt.Fprintf(w, "(%d row%s)\n", nRows, util.Pluralize(int64(nRows)))
	} else {
		if len(cols) == 0 {
			// No result selected, inform the user.
			fmt.Fprintln(w, tag)
		} else {
			// Some results selected, inform the user about how much data to expect.
			fmt.Fprintf(w, "%d row%s\n", len(allRows),
				util.Pluralize(int64(len(allRows))))

			// Then print the results themselves.
			fmt.Fprintln(w, strings.Join(cols, "\t"))
			for _, row := range allRows {
				fmt.Fprintln(w, strings.Join(row, "\t"))
			}
		}
	}
}
コード例 #3
0
ファイル: gossip.go プロジェクト: knz/cockroach
// LogStatus logs the current status of gossip such as the incoming and
// outgoing connections.
func (g *Gossip) LogStatus() {
	g.mu.Lock()
	n := len(g.nodeDescs)
	status := "ok"
	if g.mu.is.getInfo(KeySentinel) == nil {
		status = "stalled"
	}
	g.mu.Unlock()

	ctx := g.AnnotateCtx(context.TODO())
	log.Infof(
		ctx, "gossip status (%s, %d node%s)\n%s%s", status, n, util.Pluralize(int64(n)),
		g.clientStatus(), g.server.status(),
	)
}
コード例 #4
0
ファイル: allocator.go プロジェクト: BramGruneir/cockroach
func (ae *allocatorError) Error() string {
	anyAll := "all attributes"
	if ae.relaxConstraints {
		anyAll = "an attribute"
	}
	var auxInfo string
	// Whenever the likely problem is not having enough nodes up, make the
	// message really clear.
	if ae.relaxConstraints || len(ae.required) == 0 {
		auxInfo = "; likely not enough nodes in cluster"
	}
	return fmt.Sprintf("0 of %d store%s with %s matching %s%s",
		ae.aliveStoreCount, util.Pluralize(int64(ae.aliveStoreCount)),
		anyAll, ae.required, auxInfo)
}
コード例 #5
0
// AssertAllAssigned ensures that all placeholders that are used also
// have a value assigned.
func (p *PlaceholderInfo) AssertAllAssigned() error {
	var missing []string
	for pn := range p.Types {
		if _, ok := p.Values[pn]; !ok {
			missing = append(missing, "$"+pn)
		}
	}
	if len(missing) > 0 {
		sort.Strings(missing)
		return fmt.Errorf("no value provided for placeholder%s: %s",
			util.Pluralize(int64(len(missing))),
			strings.Join(missing, ", "),
		)
	}
	return nil
}
コード例 #6
0
// waitForStoreFrozen polls the given stores until they all report having no
// unfrozen Replicas (or an error or timeout occurs).
func (s *adminServer) waitForStoreFrozen(
	stream serverpb.Admin_ClusterFreezeServer,
	stores map[roachpb.StoreID]roachpb.NodeID,
	wantFrozen bool,
) error {
	mu := struct {
		syncutil.Mutex
		oks map[roachpb.StoreID]bool
	}{
		oks: make(map[roachpb.StoreID]bool),
	}

	opts := base.DefaultRetryOptions()
	opts.Closer = s.server.stopper.ShouldQuiesce()
	opts.MaxRetries = 20
	sem := make(chan struct{}, 256)
	errChan := make(chan error, 1)
	sendErr := func(err error) {
		select {
		case errChan <- err:
		default:
		}
	}

	numWaiting := len(stores) // loop until this drops to zero
	var err error
	for r := retry.Start(opts); r.Next(); {
		mu.Lock()
		for storeID, nodeID := range stores {
			storeID, nodeID := storeID, nodeID // loop-local copies for goroutine
			var nodeDesc roachpb.NodeDescriptor
			if err := s.server.gossip.GetInfoProto(gossip.MakeNodeIDKey(nodeID), &nodeDesc); err != nil {
				sendErr(err)
				break
			}
			addr := nodeDesc.Address.String()

			if _, inflightOrSucceeded := mu.oks[storeID]; inflightOrSucceeded {
				continue
			}
			mu.oks[storeID] = false // mark as inflight
			action := func() (err error) {
				var resp *storage.PollFrozenResponse
				defer func() {
					message := fmt.Sprintf("node %d, store %d: ", nodeID, storeID)

					if err != nil {
						message += err.Error()
					} else {
						numMismatching := len(resp.Results)
						mu.Lock()
						if numMismatching == 0 {
							// If the Store is in the right state, mark it as such.
							// This means we won't try it again.
							message += "ready"
							mu.oks[storeID] = true
						} else {
							// Otherwise, forget that we tried the Store so that
							// the retry loop picks it up again.
							message += fmt.Sprintf("%d replicas report wrong status", numMismatching)
							if limit := 10; numMismatching > limit {
								message += " [truncated]: "
								resp.Results = resp.Results[:limit]
							} else {
								message += ": "
							}
							message += fmt.Sprintf("%+v", resp.Results)
							delete(mu.oks, storeID)
						}
						mu.Unlock()
					}
					err = stream.Send(&serverpb.ClusterFreezeResponse{
						Message: message,
					})
				}()
				conn, err := s.server.rpcContext.GRPCDial(addr)
				if err != nil {
					return err
				}
				client := storage.NewFreezeClient(conn)
				resp, err = client.PollFrozen(context.TODO(),
					&storage.PollFrozenRequest{
						StoreRequestHeader: storage.StoreRequestHeader{
							NodeID:  nodeID,
							StoreID: storeID,
						},
						// If we are looking to freeze everything, we want to
						// collect thawed Replicas, and vice versa.
						CollectFrozen: !wantFrozen,
					})
				return err
			}
			// Run a limited, non-blocking task. That means the task simply
			// won't run if the semaphore is full (or the node is draining).
			// Both are handled by the surrounding retry loop.
			if err := s.server.stopper.RunLimitedAsyncTask(
				context.TODO(), sem, true /* wait */, func(_ context.Context) {
					if err := action(); err != nil {
						sendErr(err)
					}
				}); err != nil {
				// Node draining.
				sendErr(err)
				break
			}
		}

		numWaiting = len(stores)
		for _, ok := range mu.oks {
			if ok {
				// Store has reported that it is frozen.
				numWaiting--
				continue
			}
		}
		mu.Unlock()

		select {
		case err = <-errChan:
		default:
		}

		// Keep going unless there's been an error or everyone's frozen.
		if err != nil || numWaiting == 0 {
			break
		}
		if err := stream.Send(&serverpb.ClusterFreezeResponse{
			Message: fmt.Sprintf("waiting for %d store%s to apply operation",
				numWaiting, util.Pluralize(int64(numWaiting))),
		}); err != nil {
			return err
		}
	}
	if err != nil {
		return err
	}
	if numWaiting > 0 {
		err = fmt.Errorf("timed out waiting for %d store%s to report freeze",
			numWaiting, util.Pluralize(int64(numWaiting)))
	}
	return err
}
コード例 #7
0
// visit is the recursive function that supports walkPlan().
func (v *planVisitor) visit(plan planNode) {
	if v.err != nil {
		return
	}
	if plan == nil {
		return
	}

	lv := *v
	lv.nodeName = nodeName(plan)
	recurse := v.observer.enterNode(lv.nodeName, plan)
	defer lv.observer.leaveNode(lv.nodeName)

	if !recurse {
		return
	}

	switch n := plan.(type) {
	case *valuesNode:
		suffix := "not yet populated"
		if n.rows != nil {
			suffix = fmt.Sprintf("%d row%s",
				n.rows.Len(), util.Pluralize(int64(n.rows.Len())))
		} else if n.tuples != nil {
			suffix = fmt.Sprintf("%d row%s",
				len(n.tuples), util.Pluralize(int64(len(n.tuples))))
		}
		description := fmt.Sprintf("%d column%s, %s",
			len(n.columns), util.Pluralize(int64(len(n.columns))), suffix)
		lv.attr("size", description)

		var subplans []planNode
		for i, tuple := range n.tuples {
			for j, expr := range tuple {
				if n.columns[j].omitted {
					continue
				}
				subplans = lv.expr(fmt.Sprintf("row %d, expr", i), j, expr, subplans)
			}
		}
		lv.subqueries(subplans)

	case *valueGenerator:
		subplans := lv.expr("expr", -1, n.expr, nil)
		lv.subqueries(subplans)

	case *scanNode:
		lv.attr("table", fmt.Sprintf("%s@%s", n.desc.Name, n.index.Name))
		if n.noIndexJoin {
			lv.attr("hint", "no index join")
		}
		if n.specifiedIndex != nil {
			lv.attr("hint", fmt.Sprintf("force index @%s", n.specifiedIndex.Name))
		}
		spans := sqlbase.PrettySpans(n.spans, 2)
		if spans != "" {
			if spans == "-" {
				spans = "ALL"
			}
			lv.attr("spans", spans)
		}
		if n.limitHint > 0 && !n.limitSoft {
			lv.attr("limit", fmt.Sprintf("%d", n.limitHint))
		}
		subplans := lv.expr("filter", -1, n.filter, nil)
		lv.subqueries(subplans)

	case *filterNode:
		subplans := lv.expr("filter", -1, n.filter, nil)
		if n.explain != explainNone {
			lv.attr("mode", explainStrings[n.explain])
		}
		lv.subqueries(subplans)
		lv.visit(n.source.plan)

	case *renderNode:
		var subplans []planNode
		for i, r := range n.render {
			subplans = lv.expr("render", i, r, subplans)
		}
		lv.subqueries(subplans)
		lv.visit(n.source.plan)

	case *indexJoinNode:
		lv.visit(n.index)
		lv.visit(n.table)

	case *joinNode:
		jType := ""
		switch n.joinType {
		case joinTypeInner:
			jType = "inner"
			if len(n.pred.leftColNames) == 0 && n.pred.onCond == nil {
				jType = "cross"
			}
		case joinTypeLeftOuter:
			jType = "left outer"
		case joinTypeRightOuter:
			jType = "right outer"
		case joinTypeFullOuter:
			jType = "full outer"
		}
		lv.attr("type", jType)

		if len(n.pred.leftColNames) > 0 {
			var buf bytes.Buffer
			buf.WriteByte('(')
			n.pred.leftColNames.Format(&buf, parser.FmtSimple)
			buf.WriteString(") = (")
			n.pred.rightColNames.Format(&buf, parser.FmtSimple)
			buf.WriteByte(')')
			lv.attr("equality", buf.String())
		}
		subplans := lv.expr("pred", -1, n.pred.onCond, nil)
		lv.subqueries(subplans)
		lv.visit(n.left.plan)
		lv.visit(n.right.plan)

	case *selectTopNode:
		if n.plan != nil {
			lv.visit(n.plan)
		} else {
			if n.limit != nil {
				lv.visit(n.limit)
			}
			if n.distinct != nil {
				lv.visit(n.distinct)
			}
			if n.sort != nil {
				lv.visit(n.sort)
			}
			if n.window != nil {
				lv.visit(n.window)
			}
			if n.group != nil {
				lv.visit(n.group)
			}
			lv.visit(n.source)
		}

	case *limitNode:
		subplans := lv.expr("count", -1, n.countExpr, nil)
		subplans = lv.expr("offset", -1, n.offsetExpr, subplans)
		lv.subqueries(subplans)
		lv.visit(n.plan)

	case *distinctNode:
		if n.columnsInOrder != nil {
			var buf bytes.Buffer
			prefix := ""
			columns := n.Columns()
			for i, key := range n.columnsInOrder {
				if key {
					buf.WriteString(prefix)
					buf.WriteString(columns[i].Name)
					prefix = ", "
				}
			}
			lv.attr("key", buf.String())
		}
		lv.visit(n.plan)

	case *sortNode:
		var columns ResultColumns
		if n.plan != nil {
			columns = n.plan.Columns()
		}
		// We use n.ordering and not plan.Ordering() because
		// plan.Ordering() does not include the added sort columns not
		// present in the output.
		order := orderingInfo{ordering: n.ordering}
		lv.attr("order", order.AsString(columns))
		switch ss := n.sortStrategy.(type) {
		case *iterativeSortStrategy:
			lv.attr("strategy", "iterative")
		case *sortTopKStrategy:
			lv.attr("strategy", fmt.Sprintf("top %d", ss.topK))
		}
		lv.visit(n.plan)

	case *groupNode:
		var subplans []planNode
		for i, agg := range n.funcs {
			subplans = lv.expr("aggregate", i, agg.expr, subplans)
		}
		for i, rexpr := range n.render {
			subplans = lv.expr("render", i, rexpr, subplans)
		}
		subplans = lv.expr("having", -1, n.having, subplans)
		lv.subqueries(subplans)
		lv.visit(n.plan)

	case *windowNode:
		var subplans []planNode
		for i, agg := range n.funcs {
			subplans = lv.expr("window", i, agg.expr, subplans)
		}
		for i, rexpr := range n.windowRender {
			subplans = lv.expr("render", i, rexpr, subplans)
		}
		lv.subqueries(subplans)
		lv.visit(n.plan)

	case *unionNode:
		lv.visit(n.left)
		lv.visit(n.right)

	case *splitNode:
		var subplans []planNode
		for i, e := range n.exprs {
			subplans = lv.expr("expr", i, e, subplans)
		}
		lv.subqueries(subplans)

	case *insertNode:
		var buf bytes.Buffer
		buf.WriteString(n.tableDesc.Name)
		buf.WriteByte('(')
		for i, col := range n.insertCols {
			if i > 0 {
				buf.WriteString(", ")
			}
			buf.WriteString(col.Name)
		}
		buf.WriteByte(')')
		lv.attr("into", buf.String())

		var subplans []planNode
		for i, dexpr := range n.defaultExprs {
			subplans = lv.expr("default", i, dexpr, subplans)
		}
		for i, cexpr := range n.checkHelper.exprs {
			subplans = lv.expr("check", i, cexpr, subplans)
		}
		for i, rexpr := range n.rh.exprs {
			subplans = lv.expr("returning", i, rexpr, subplans)
		}
		n.tw.walkExprs(func(d string, i int, e parser.TypedExpr) {
			subplans = lv.expr(d, i, e, subplans)
		})
		lv.subqueries(subplans)
		lv.visit(n.run.rows)

	case *updateNode:
		lv.attr("table", n.tableDesc.Name)
		if len(n.tw.ru.updateCols) > 0 {
			var buf bytes.Buffer
			for i, col := range n.tw.ru.updateCols {
				if i > 0 {
					buf.WriteString(", ")
				}
				buf.WriteString(col.Name)
			}
			lv.attr("set", buf.String())
		}
		var subplans []planNode
		for i, rexpr := range n.rh.exprs {
			subplans = lv.expr("returning", i, rexpr, subplans)
		}
		n.tw.walkExprs(func(d string, i int, e parser.TypedExpr) {
			subplans = lv.expr(d, i, e, subplans)
		})
		lv.subqueries(subplans)
		lv.visit(n.run.rows)

	case *deleteNode:
		lv.attr("from", n.tableDesc.Name)
		var subplans []planNode
		for i, rexpr := range n.rh.exprs {
			subplans = lv.expr("returning", i, rexpr, subplans)
		}
		n.tw.walkExprs(func(d string, i int, e parser.TypedExpr) {
			subplans = lv.expr(d, i, e, subplans)
		})
		lv.subqueries(subplans)
		lv.visit(n.run.rows)

	case *createTableNode:
		if n.n.As() {
			lv.visit(n.sourcePlan)
		}

	case *createViewNode:
		lv.attr("query", n.sourceQuery)
		lv.visit(n.sourcePlan)

	case *delayedNode:
		lv.attr("source", n.name)
		lv.visit(n.plan)

	case *explainDebugNode:
		lv.visit(n.plan)

	case *ordinalityNode:
		lv.visit(n.source)

	case *explainTraceNode:
		lv.visit(n.plan)

	case *explainPlanNode:
		lv.attr("expanded", strconv.FormatBool(n.expanded))
		lv.visit(n.plan)
	}
}