예제 #1
0
파일: sql.go 프로젝트: JKhawaja/cockroach
// runOneStatement executes one statement and terminates
// on error.
func runStatements(conn *sqlConn, stmts []string) error {
	for _, stmt := range stmts {
		q := makeQuery(stmt)
		for {
			cols, allRows, tag, err := runQuery(conn, q)
			if err != nil {
				if err == pq.ErrNoMoreResults {
					break
				}
				return err
			}

			if len(cols) == 0 {
				// No result selected, inform the user.
				fmt.Fprintln(os.Stdout, tag)
			} else {
				// Some results selected, inform the user about how much data to expect.
				fmt.Fprintf(os.Stdout, "%d row%s\n", len(allRows),
					util.Pluralize(int64(len(allRows))))

				// Then print the results themselves.
				fmt.Fprintln(os.Stdout, strings.Join(cols, "\t"))
				for _, row := range allRows {
					fmt.Fprintln(os.Stdout, strings.Join(row, "\t"))
				}
			}
			q = nextResult
		}
	}
	return nil
}
예제 #2
0
// printQueryOutput takes a list of column names and a list of row contents
// writes a pretty table to 'w', or "OK" if empty.
func printQueryOutput(
	w io.Writer, cols []string, allRows [][]string, tag string, pretty bool,
) {
	if len(cols) == 0 {
		// This operation did not return rows, just show the tag.
		fmt.Fprintln(w, tag)
		return
	}

	if pretty {
		// Initialize tablewriter and set column names as the header row.
		table := tablewriter.NewWriter(w)
		table.SetAutoFormatHeaders(false)
		table.SetAutoWrapText(false)
		table.SetHeader(cols)
		for _, row := range allRows {
			for i, r := range row {
				row[i] = expandTabsAndNewLines(r)
			}
			table.Append(row)
		}
		table.Render()
		nRows := len(allRows)
		fmt.Fprintf(w, "(%d row%s)\n", nRows, util.Pluralize(int64(nRows)))
	} else {
		if len(cols) == 0 {
			// No result selected, inform the user.
			fmt.Fprintln(w, tag)
		} else {
			// Some results selected, inform the user about how much data to expect.
			fmt.Fprintf(w, "%d row%s\n", len(allRows),
				util.Pluralize(int64(len(allRows))))

			// Then print the results themselves.
			fmt.Fprintln(w, strings.Join(cols, "\t"))
			for _, row := range allRows {
				fmt.Fprintln(w, strings.Join(row, "\t"))
			}
		}
	}
}
예제 #3
0
// LogStatus logs the current status of gossip such as the incoming and
// outgoing connections.
func (g *Gossip) LogStatus() {
	g.mu.Lock()
	n := len(g.nodeDescs)
	status := "ok"
	if g.is.getInfo(KeySentinel) == nil {
		status = "stalled"
	}
	g.mu.Unlock()

	log.Infof("gossip status (%s, %d node%s)\n%s%s",
		status, n, util.Pluralize(int64(n)),
		g.clientStatus(), g.server.status())
}
예제 #4
0
func (ae *allocatorError) Error() string {
	anyAll := "all attributes"
	if ae.relaxConstraints {
		anyAll = "an attribute"
	}
	var auxInfo string
	// Whenever the likely problem is not having enough nodes up, make the
	// message really clear.
	if ae.relaxConstraints || len(ae.required.Attrs) == 0 {
		auxInfo = "; likely not enough nodes in cluster"
	}
	return fmt.Sprintf("0 of %d store%s with %s matching [%s]%s",
		ae.aliveStoreCount, util.Pluralize(int64(ae.aliveStoreCount)),
		anyAll, ae.required, auxInfo)
}
예제 #5
0
파일: admin.go 프로젝트: csdigi/cockroach
// waitForStoreFrozen polls the given stores until they all report having no
// unfrozen Replicas (or an error or timeout occurs).
func (s *adminServer) waitForStoreFrozen(
	stream serverpb.Admin_ClusterFreezeServer,
	stores map[roachpb.StoreID]roachpb.NodeID,
	wantFrozen bool,
) error {
	mu := struct {
		sync.Mutex
		oks map[roachpb.StoreID]bool
	}{
		oks: make(map[roachpb.StoreID]bool),
	}

	opts := base.DefaultRetryOptions()
	opts.Closer = s.server.stopper.ShouldDrain()
	opts.MaxRetries = 20
	sem := make(chan struct{}, 256)
	errChan := make(chan error, 1)
	sendErr := func(err error) {
		select {
		case errChan <- err:
		default:
		}
	}

	numWaiting := len(stores) // loop until this drops to zero
	var err error
	for r := retry.Start(opts); r.Next(); {
		mu.Lock()
		for storeID, nodeID := range stores {
			storeID, nodeID := storeID, nodeID // loop-local copies for goroutine
			var nodeDesc roachpb.NodeDescriptor
			if err := s.server.gossip.GetInfoProto(gossip.MakeNodeIDKey(nodeID), &nodeDesc); err != nil {
				sendErr(err)
				break
			}
			addr := nodeDesc.Address.String()

			if _, inflightOrSucceeded := mu.oks[storeID]; inflightOrSucceeded {
				continue
			}
			mu.oks[storeID] = false // mark as inflight
			action := func() (err error) {
				var resp *roachpb.PollFrozenResponse
				defer func() {
					message := fmt.Sprintf("node %d, store %d: ", nodeID, storeID)

					if err != nil {
						message += err.Error()
					} else {
						numMismatching := len(resp.Results)
						mu.Lock()
						if numMismatching == 0 {
							// If the Store is in the right state, mark it as such.
							// This means we won't try it again.
							message += "ready"
							mu.oks[storeID] = true
						} else {
							// Otherwise, forget that we tried the Store so that
							// the retry loop picks it up again.
							message += fmt.Sprintf("%d replicas report wrong status", numMismatching)
							if limit := 10; numMismatching > limit {
								message += " [truncated]: "
								resp.Results = resp.Results[:limit]
							} else {
								message += ": "
							}
							message += fmt.Sprintf("%+v", resp.Results)
							delete(mu.oks, storeID)
						}
						mu.Unlock()
						err = stream.Send(&serverpb.ClusterFreezeResponse{
							Message: message,
						})
					}
				}()
				conn, err := s.server.rpcContext.GRPCDial(addr)
				if err != nil {
					return err
				}
				client := roachpb.NewInternalClient(conn)
				resp, err = client.PollFrozen(context.Background(),
					&roachpb.PollFrozenRequest{
						StoreRequestHeader: roachpb.StoreRequestHeader{
							NodeID:  nodeID,
							StoreID: storeID,
						},
						// If we are looking to freeze everything, we want to
						// collect thawed Replicas, and vice versa.
						CollectFrozen: !wantFrozen,
					})
				return err
			}
			// Run a limited, non-blocking task. That means the task simply
			// won't run if the semaphore is full (or the node is draining).
			// Both are handled by the surrounding retry loop.
			if !s.server.stopper.RunLimitedAsyncTask(sem, func() {
				if err := action(); err != nil {
					sendErr(err)
				}
			}) {
				// Node draining.
				sendErr(errors.New("node is shutting down"))
				break
			}
		}

		numWaiting = len(stores)
		for _, ok := range mu.oks {
			if ok {
				// Store has reported that it is frozen.
				numWaiting--
				continue
			}
		}
		mu.Unlock()

		select {
		case err = <-errChan:
		default:
		}

		// Keep going unless there's been an error or everyone's frozen.
		if err != nil || numWaiting == 0 {
			break
		}
		if err := stream.Send(&serverpb.ClusterFreezeResponse{
			Message: fmt.Sprintf("waiting for %d store%s to apply operation",
				numWaiting, util.Pluralize(int64(numWaiting))),
		}); err != nil {
			return err
		}
	}
	if err != nil {
		return err
	}
	if numWaiting > 0 {
		err = fmt.Errorf("timed out waiting for %d store%s to report freeze",
			numWaiting, util.Pluralize(int64(numWaiting)))
	}
	return err
}