Exemple #1
0
// MakeSplitKey transforms an SQL table key such that it is a valid split key
// (i.e. does not occur in the middle of a row).
func MakeSplitKey(key roachpb.Key) (roachpb.Key, error) {
	if encoding.PeekType(key) != encoding.Int {
		// Not a table key, so already a split key.
		return key, nil
	}

	n := len(key)
	// The column ID length is encoded as a varint and we take advantage of the
	// fact that the column ID itself will be encoded in 0-9 bytes and thus the
	// length of the column ID data will fit in a single byte.
	buf := key[n-1:]
	if encoding.PeekType(buf) != encoding.Int {
		// The last byte is not a valid column ID suffix.
		return nil, errors.Errorf("%s: not a valid table key", key)
	}

	// Strip off the column ID suffix from the buf. The last byte of the buf
	// contains the length of the column ID suffix (which might be 0 if the buf
	// does not contain a column ID suffix).
	_, colIDLen, err := encoding.DecodeUvarintAscending(buf)
	if err != nil {
		return nil, err
	}
	if int(colIDLen)+1 > n {
		// The column ID length was impossible. colIDLen is the length of the
		// encoded column ID suffix. We add 1 to account for the byte holding the
		// length of the encoded column ID and if that total (colIDLen+1) is
		// greater than the key suffix (n == len(buf)) then we bail. Note that we
		// don't consider this an error because MakeSplitKey can be called on keys
		// that look like table keys but which do not have a column ID length
		// suffix (e.g SystemConfig.ComputeSplitKeys).
		return nil, errors.Errorf("%s: malformed table key", key)
	}
	return key[:len(key)-int(colIDLen)-1], nil
}
Exemple #2
0
func parseOptions(data []byte) (sql.SessionArgs, error) {
	args := sql.SessionArgs{}
	buf := readBuffer{msg: data}
	for {
		key, err := buf.getString()
		if err != nil {
			return sql.SessionArgs{}, errors.Errorf("error reading option key: %s", err)
		}
		if len(key) == 0 {
			break
		}
		value, err := buf.getString()
		if err != nil {
			return sql.SessionArgs{}, errors.Errorf("error reading option value: %s", err)
		}
		switch key {
		case "database":
			args.Database = value
		case "user":
			args.User = value
		default:
			if log.V(1) {
				log.Warningf("unrecognized configuration parameter %q", key)
			}
		}
	}
	return args, nil
}
Exemple #3
0
// Addr returns the address for the key, used to lookup the range containing
// the key. In the normal case, this is simply the key's value. However, for
// local keys, such as transaction records, range-spanning binary tree node
// pointers, the address is the inner encoded key, with the local key prefix
// and the suffix and optional detail removed. This address unwrapping is
// performed repeatedly in the case of doubly-local keys. In this way, local
// keys address to the same range as non-local keys, but are stored separately
// so that they don't collide with user-space or global system keys.
//
// However, not all local keys are addressable in the global map. Only range
// local keys incorporating a range key (start key or transaction key) are
// addressable (e.g. range metadata and txn records). Range local keys
// incorporating the Range ID are not (e.g. abort cache entries, and range
// stats).
func Addr(k roachpb.Key) (roachpb.RKey, error) {
	if !IsLocal(k) {
		return roachpb.RKey(k), nil
	}

	for {
		if bytes.HasPrefix(k, localStorePrefix) {
			return nil, errors.Errorf("store-local key %q is not addressable", k)
		}
		if bytes.HasPrefix(k, LocalRangeIDPrefix) {
			return nil, errors.Errorf("local range ID key %q is not addressable", k)
		}
		if !bytes.HasPrefix(k, LocalRangePrefix) {
			return nil, errors.Errorf("local key %q malformed; should contain prefix %q",
				k, LocalRangePrefix)
		}
		k = k[len(LocalRangePrefix):]
		var err error
		// Decode the encoded key, throw away the suffix and detail.
		if _, k, err = encoding.DecodeBytesAscending(k, nil); err != nil {
			return nil, err
		}
		if !bytes.HasPrefix(k, localPrefix) {
			break
		}
	}
	return roachpb.RKey(k), nil
}
Exemple #4
0
func (hsm *HTTPStateManager) getManifestEtag(m *Manifest) (string, string, error) {
	u, err := hsm.manifestURL(m)
	if err != nil {
		return "", "", err
	}

	Log.Debug.Printf("Getting manifest from %s", u)

	grq, err := http.NewRequest("GET", u, nil)
	if err != nil {
		return "", "", err
	}
	grz, err := hsm.httpRequest(grq)
	if err != nil {
		return "", "", err
	}
	defer grz.Body.Close()
	if !(grz.StatusCode >= 200 && grz.StatusCode < 300) {
		return "", "", errors.Errorf("GET %s, %s: %#v", u, grz.Status, m)
	}
	rm := hsm.jsonManifest(grz.Body)
	different, differences := rm.Diff(m)
	if different {
		return "", "", errors.Errorf("Remote and local versions of manifest don't match: %#v", differences)
	}
	return u, grz.Header.Get("Etag"), nil
}
Exemple #5
0
// colIndex takes an expression that refers to a column using an integer, verifies it refers to a
// valid render target and returns the corresponding column index. For example:
//    SELECT a from T ORDER by 1
// Here "1" refers to the first render target "a". The returned index is 0.
func (p *planner) colIndex(numOriginalCols int, expr parser.Expr, context string) (int, error) {
	ord := int64(-1)
	switch i := expr.(type) {
	case *parser.NumVal:
		if i.ShouldBeInt64() {
			val, err := i.AsInt64()
			if err != nil {
				return -1, err
			}
			ord = val
		} else {
			return -1, errors.Errorf("non-integer constant in %s: %s", context, expr)
		}
	case *parser.DInt:
		if *i >= 0 {
			ord = int64(*i)
		}
	case *parser.StrVal:
		return -1, errors.Errorf("non-integer constant in %s: %s", context, expr)
	case parser.Datum:
		return -1, errors.Errorf("non-integer constant in %s: %s", context, expr)
	}
	if ord != -1 {
		if ord < 1 || ord > int64(numOriginalCols) {
			return -1, errors.Errorf("%s position %s is not in select list", context, expr)
		}
		ord--
	}
	return int(ord), nil
}
Exemple #6
0
func addLogs(ws *websocket.Conn) {
	var err error
	defer func() {
		msg := &errMsg{}
		if err != nil {
			msg.Error = err.Error()
			log.Errorf("failure in logs webservice: %s", err)
		}
		websocket.JSON.Send(ws, msg)
		ws.Close()
	}()
	req := ws.Request()
	t := context.GetAuthToken(req)
	if t == nil {
		err = errors.Errorf("wslogs: no token")
		return
	}
	if t.GetAppName() != app.InternalAppName {
		err = errors.Errorf("wslogs: invalid token app name: %q", t.GetAppName())
		return
	}
	err = scanLogs(ws)
	if err != nil {
		return
	}
}
Exemple #7
0
func newBlueprint(n *parser.AstNode) (string, error) {
	if n.Type != parser.AstBlueprint {
		return "", errors.Errorf("expected script node, got %s", n.Type)
	}

	var bprint string

	for _, child := range n.Children {
		switch child.Type {
		case parser.AstScript:
			if bprint != "" {
				return "", errors.Errorf("only one blueprint node allowed!")
			}
			bscript, ok := child.Value.(*parser.BashScript)
			if !ok {
				return "", errors.Errorf("expected a string value, got %T", child.Value)
			}
			bprint = bscript.Script
		case parser.AstText:
			// ignore
		default:
			return "", errors.Errorf("unexpected node seen: %s", child.Type)
		}
	}

	return bprint, nil
}
Exemple #8
0
// DecodeVarintAscending decodes a value encoded by EncodeVaringAscending.
func DecodeVarintAscending(b []byte) ([]byte, int64, error) {
	if len(b) == 0 {
		return nil, 0, errors.Errorf("insufficient bytes to decode uvarint value")
	}
	length := int(b[0]) - intZero
	if length < 0 {
		length = -length
		remB := b[1:]
		if len(remB) < length {
			return nil, 0, errors.Errorf("insufficient bytes to decode uvarint value: %s", remB)
		}
		var v int64
		// Use the ones-complement of each encoded byte in order to build
		// up a positive number, then take the ones-complement again to
		// arrive at our negative value.
		for _, t := range remB[:length] {
			v = (v << 8) | int64(^t)
		}
		return remB[length:], ^v, nil
	}

	remB, v, err := DecodeUvarintAscending(b)
	if err != nil {
		return remB, 0, err
	}
	if v > math.MaxInt64 {
		return nil, 0, errors.Errorf("varint %d overflows int64", v)
	}
	return remB, int64(v), nil
}
Exemple #9
0
func decodeBytesInternal(b []byte, r []byte, e escapes, expectMarker bool) ([]byte, []byte, error) {
	if expectMarker {
		if len(b) == 0 || b[0] != e.marker {
			return nil, nil, errors.Errorf("did not find marker %#x in buffer %#x", e.marker, b)
		}
		b = b[1:]
	}

	for {
		i := bytes.IndexByte(b, e.escape)
		if i == -1 {
			return nil, nil, errors.Errorf("did not find terminator %#x in buffer %#x", e.escape, b)
		}
		if i+1 >= len(b) {
			return nil, nil, errors.Errorf("malformed escape in buffer %#x", b)
		}
		v := b[i+1]
		if v == e.escapedTerm {
			if r == nil {
				r = b[:i]
			} else {
				r = append(r, b[:i]...)
			}
			return b[i+2:], r, nil
		}

		if v != e.escaped00 {
			return nil, nil, errors.Errorf("unknown escape sequence: %#x %#x", e.escape, v)
		}

		r = append(r, b[:i]...)
		r = append(r, e.escapedFF)
		b = b[i+2:]
	}
}
Exemple #10
0
// GetServerNets returns the networks associated with the server @s.
func (c *Client) GetServerNets(s Server) (nets []Network, err error) {
	var seen = make(map[string]bool) /* map { networkId -> bool */

	// The GetNetworks() call returns only the networks visible to the current account.
	// In practice, the current account may be a sub-account, and the server may be
	// using a network owned by the parent's account. If that is the case,	the results will
	// be empty, and the credentials of the parent account are neede to obtain the details.
	networks, err := c.GetNetworks(s.LocationId)
	if err != nil {
		return nil, errors.Errorf("failed to query networks in %s: %s", s.LocationId, err)
	} else if len(networks) == 0 {
		return nil, errors.Errorf("failed to query network information in %s", s.LocationId)
	}

	for idx := range s.Details.IpAddresses {
		if ip := s.Details.IpAddresses[idx].Internal; ip == "" {
			/* only use the internal IPs */
		} else if net, err := NetworkByIP(ip, networks); err != nil {
			return nil, errors.Errorf("failed to identify network for %s: %s", ip, err)
		} else if net != nil && !seen[net.Id] {
			seen[net.Id] = true
			nets = append(nets, *net)
		}
	}
	return
}
Exemple #11
0
func (ds *ServerImpl) flowStreamInt(ctx context.Context, stream DistSQL_FlowStreamServer) error {
	// Receive the first message.
	msg, err := stream.Recv()
	if err != nil {
		if err == io.EOF {
			return errors.Errorf("empty stream")
		}
		return err
	}
	if msg.Header == nil {
		return errors.Errorf("no header in first message")
	}
	flowID := msg.Header.FlowID
	streamID := msg.Header.StreamID
	if log.V(1) {
		log.Infof(ctx, "connecting inbound stream %s/%d", flowID.Short(), streamID)
	}
	f, streamInfo, err := ds.flowRegistry.ConnectInboundStream(flowID, streamID)
	if err != nil {
		return err
	}
	log.VEventf(ctx, 1, "connected inbound stream %s/%d", flowID.Short(), streamID)
	defer ds.flowRegistry.FinishInboundStream(streamInfo)
	return ProcessInboundStream(&f.FlowCtx, stream, msg, streamInfo.receiver)
}
// IncrementEpoch is called to increment the current liveness epoch,
// thereby invalidating anything relying on the liveness of the
// previous epoch. This method does a conditional put on the node
// liveness record, and if successful, stores the updated liveness
// record in the nodes map. If this method is called on a node ID
// which is considered live according to the most recent information
// gathered through gossip, an error is returned.
func (nl *NodeLiveness) IncrementEpoch(ctx context.Context, liveness *Liveness) error {
	if liveness.isLive(nl.clock.Now(), nl.clock.MaxOffset()) {
		return errors.Errorf("cannot increment epoch on live node: %+v", liveness)
	}
	newLiveness := *liveness
	newLiveness.Epoch++
	if err := nl.updateLiveness(ctx, &newLiveness, liveness, func(actual Liveness) error {
		if actual.Epoch > liveness.Epoch {
			newLiveness = actual
			return nil
		} else if actual.Epoch < liveness.Epoch {
			return errors.Errorf("unexpected liveness epoch %d; expected >= %d", actual.Epoch, liveness.Epoch)
		}
		nl.mu.Lock()
		defer nl.mu.Unlock()
		nl.mu.nodes[actual.NodeID] = actual
		return errors.Errorf("mismatch incrementing epoch for %+v; actual is %+v", liveness, actual)
	}); err != nil {
		return err
	}

	log.VEventf(ctx, 1, "incremented node %d liveness epoch to %d",
		newLiveness.NodeID, newLiveness.Epoch)
	nl.mu.Lock()
	defer nl.mu.Unlock()
	if nodeID := nl.gossip.NodeID.Get(); nodeID == liveness.NodeID {
		nl.mu.self = newLiveness
	} else {
		nl.mu.nodes[newLiveness.NodeID] = newLiveness
	}
	nl.metrics.EpochIncrements.Inc(1)
	return nil
}
Exemple #13
0
func ContainerSetsEqual(a []*beacon.Container, b []*beacon.Container) error {
	if len(a) != len(b) {
		return errors.Errorf("container sets inequal length: %d != %d", len(a), len(b))
	}

	newSet := func(arr []*beacon.Container) map[string]*beacon.Container {
		set := make(map[string]*beacon.Container, len(arr))
		for _, cntr := range arr {
			set[cntr.ID] = cntr
		}
		return set
	}

	aSet := newSet(a)
	bSet := newSet(b)

	for id, c1 := range aSet {
		if c2, ok := bSet[id]; !ok {
			return errors.Errorf("container[%s] not in both sets", id)
		} else if err := ContainersEqual(c1, c2); err != nil {
			return errors.Wrapf(err, "container[%s] inequal", id)
		}
	}
	return nil
}
func makeStreamMerger(
	orderings []sqlbase.ColumnOrdering, sources []RowSource,
) (streamMerger, error) {
	if len(sources) != 2 {
		return streamMerger{}, errors.Errorf("only 2 sources allowed, %d provided", len(sources))
	}
	if len(sources) != len(orderings) {
		return streamMerger{}, errors.Errorf(
			"orderings count %d doesn't match source count %d", len(orderings), len(sources))
	}
	if len(orderings[0]) != len(orderings[1]) {
		return streamMerger{}, errors.Errorf(
			"ordering lengths don't match: %d and %d", len(orderings[0]), len(orderings[1]))
	}
	for i, ord := range orderings[0] {
		if ord.Direction != orderings[1][i].Direction {
			return streamMerger{}, errors.New("Ordering mismatch")
		}
	}

	return streamMerger{
		left: streamCacher{
			src:      sources[0],
			ordering: orderings[0],
		},
		right: streamCacher{
			src:      sources[1],
			ordering: orderings[1],
		},
	}, nil
}
Exemple #15
0
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
// If it timeouts, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
func (d *Daemon) StopWithError() error {
	if d.cmd == nil || d.Wait == nil {
		return errDaemonNotStarted
	}

	defer func() {
		d.logFile.Close()
		d.cmd = nil
	}()

	i := 1
	tick := time.Tick(time.Second)

	if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
		if strings.Contains(err.Error(), "os: process already finished") {
			return errDaemonNotStarted
		}
		return errors.Errorf("could not send signal: %v", err)
	}
out1:
	for {
		select {
		case err := <-d.Wait:
			return err
		case <-time.After(20 * time.Second):
			// time for stopping jobs and run onShutdown hooks
			d.log.Logf("[%s] daemon started", d.id)
			break out1
		}
	}

out2:
	for {
		select {
		case err := <-d.Wait:
			return err
		case <-tick:
			i++
			if i > 5 {
				d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
				break out2
			}
			d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
			if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
				return errors.Errorf("could not send signal: %v", err)
			}
		}
	}

	if err := d.cmd.Process.Kill(); err != nil {
		d.log.Logf("Could not kill daemon: %v", err)
		return err
	}

	if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
		return err
	}

	return nil
}
Exemple #16
0
// PeekLength returns the length of the encoded value at the start of b.  Note:
// if this function succeeds, it's not a guarantee that decoding the value will
// succeed.
func PeekLength(b []byte) (int, error) {
	if len(b) == 0 {
		return 0, errors.Errorf("empty slice")
	}
	m := b[0]
	switch m {
	case encodedNull, encodedNullDesc, encodedNotNull, encodedNotNullDesc,
		floatNaN, floatNaNDesc, floatZero, decimalZero:
		return 1, nil
	case bytesMarker:
		return getBytesLength(b, ascendingEscapes)
	case bytesDescMarker:
		return getBytesLength(b, descendingEscapes)
	case timeMarker:
		return GetMultiVarintLen(b, 2)
	case durationBigNegMarker, durationMarker, durationBigPosMarker:
		return GetMultiVarintLen(b, 3)
	case floatNeg, floatPos:
		// the marker is followed by 8 bytes
		if len(b) < 9 {
			return 0, errors.Errorf("slice too short for float (%d)", len(b))
		}
		return 9, nil
	}
	if m >= IntMin && m <= IntMax {
		return getVarintLen(b)
	}
	if m >= decimalNaN && m <= decimalNaNDesc {
		return getDecimalLen(b)
	}
	return 0, errors.Errorf("unknown tag %d", m)
}
Exemple #17
0
// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time.
// FIXME(vdemeester) Attach this to the Daemon struct
func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error {
	after := time.After(timeout)

	args := append(arg, "inspect", "-f", expr, name)
	for {
		result := icmd.RunCommand(dockerBinary, args...)
		if result.Error != nil {
			if !strings.Contains(result.Stderr(), "No such") {
				return errors.Errorf("error executing docker inspect: %v\n%s",
					result.Stderr(), result.Stdout())
			}
			select {
			case <-after:
				return result.Error
			default:
				time.Sleep(10 * time.Millisecond)
				continue
			}
		}

		out := strings.TrimSpace(result.Stdout())
		if out == expected {
			break
		}

		select {
		case <-after:
			return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout)
		default:
		}

		time.Sleep(100 * time.Millisecond)
	}
	return nil
}
Exemple #18
0
// checkNode checks all the endpoints of the status server hosted by node and
// requests info for the node with otherNodeID. That node could be the same
// other node, the same node or "local".
func checkNode(
	ctx context.Context,
	t *testing.T,
	c cluster.Cluster,
	i int,
	nodeID,
	otherNodeID, expectedNodeID roachpb.NodeID,
) {
	urlIDs := []string{otherNodeID.String()}
	if nodeID == otherNodeID {
		urlIDs = append(urlIDs, "local")
	}
	var details serverpb.DetailsResponse
	for _, urlID := range urlIDs {
		if err := httputil.GetJSON(cluster.HTTPClient, c.URL(ctx, i)+"/_status/details/"+urlID, &details); err != nil {
			t.Fatal(errors.Errorf("unable to parse details - %s", err))
		}
		if details.NodeID != expectedNodeID {
			t.Fatal(errors.Errorf("%d calling %s: node ids don't match - expected %d, actual %d", nodeID, urlID, expectedNodeID, details.NodeID))
		}

		get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/gossip/%s", urlID))
		get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/nodes/%s", urlID))
		get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/logfiles/%s", urlID))
		get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/logs/%s", urlID))
		get(ctx, t, c.URL(ctx, i), fmt.Sprintf("/_status/stacks/%s", urlID))
	}
}
Exemple #19
0
// TypeCheck implements the Expr interface.
func (expr *IndirectionExpr) TypeCheck(ctx *SemaContext, desired Type) (TypedExpr, error) {
	for i, part := range expr.Indirection {
		switch t := part.(type) {
		case *ArraySubscript:
			if t.Slice {
				return nil, util.UnimplementedWithIssueErrorf(2115, "ARRAY slicing in %s", expr)
			}
			if i > 0 {
				return nil, util.UnimplementedWithIssueErrorf(2115, "multidimensional ARRAY %s", expr)
			}

			beginExpr, err := typeCheckAndRequire(ctx, t.Begin, TypeInt, "ARRAY subscript")
			if err != nil {
				return nil, err
			}
			t.Begin = beginExpr
		default:
			return nil, errors.Errorf("syntax not yet supported: %s", expr.Indirection)
		}
	}

	subExpr, err := expr.Expr.TypeCheck(ctx, tArray{desired})
	if err != nil {
		return nil, err
	}
	typ := subExpr.ResolvedType()
	arrType, ok := typ.(tArray)
	if !ok {
		return nil, errors.Errorf("cannot subscript type %s because it is not an array", typ)
	}
	expr.Expr = subExpr
	expr.typ = arrType.Typ
	return expr, nil
}
Exemple #20
0
// AllocateTarget returns a suitable store for a new allocation with the
// required attributes. Nodes already accommodating existing replicas are ruled
// out as targets. The range ID of the replica being allocated for is also
// passed in to ensure that we don't try to replace an existing dead replica on
// a store. If relaxConstraints is true, then the required attributes will be
// relaxed as necessary, from least specific to most specific, in order to
// allocate a target.
func (a *Allocator) AllocateTarget(
	constraints config.Constraints,
	existing []roachpb.ReplicaDescriptor,
	rangeID roachpb.RangeID,
	relaxConstraints bool,
) (*roachpb.StoreDescriptor, error) {
	sl, aliveStoreCount, throttledStoreCount := a.storePool.getStoreList(rangeID)
	if a.options.UseRuleSolver {
		candidates := allocateCandidates(
			sl,
			constraints,
			existing,
			a.storePool.getNodeLocalities(existing),
			a.storePool.deterministic,
		)
		if log.V(3) {
			log.Infof(context.TODO(), "allocate candidates: %s", candidates)
		}
		if target := candidates.selectGood(a.randGen); target != nil {
			return target, nil
		}

		// When there are throttled stores that do match, we shouldn't send
		// the replica to purgatory.
		if throttledStoreCount > 0 {
			return nil, errors.Errorf("%d matching stores are currently throttled", throttledStoreCount)
		}
		return nil, &allocatorError{
			required: constraints.Constraints,
		}
	}

	existingNodes := make(nodeIDSet, len(existing))
	for _, repl := range existing {
		existingNodes[repl.NodeID] = struct{}{}
	}

	// Because more redundancy is better than less, if relaxConstraints, the
	// matching here is lenient, and tries to find a target by relaxing an
	// attribute constraint, from last attribute to first.
	for attrs := constraints.Constraints; ; attrs = attrs[:len(attrs)-1] {
		filteredSL := sl.filter(config.Constraints{Constraints: attrs})
		if target := a.selectGood(filteredSL, existingNodes); target != nil {
			return target, nil
		}

		// When there are throttled stores that do match, we shouldn't send
		// the replica to purgatory or even consider relaxing the constraints.
		if throttledStoreCount > 0 {
			return nil, errors.Errorf("%d matching stores are currently throttled", throttledStoreCount)
		}
		if len(attrs) == 0 || !relaxConstraints {
			return nil, &allocatorError{
				required:         constraints.Constraints,
				relaxConstraints: relaxConstraints,
				aliveStoreCount:  aliveStoreCount,
			}
		}
	}
}
// verifyStoreList ensures that the returned list of stores is correct.
func verifyStoreList(
	sp *StorePool,
	constraints config.Constraints,
	rangeID roachpb.RangeID,
	expected []int,
	expectedAliveStoreCount int,
	expectedThrottledStoreCount int,
) error {
	var actual []int
	sl, aliveStoreCount, throttledStoreCount := sp.getStoreList(rangeID)
	sl = sl.filter(constraints)
	if aliveStoreCount != expectedAliveStoreCount {
		return errors.Errorf("expected AliveStoreCount %d does not match actual %d",
			expectedAliveStoreCount, aliveStoreCount)
	}
	if throttledStoreCount != expectedThrottledStoreCount {
		return errors.Errorf("expected ThrottledStoreCount %d does not match actual %d",
			expectedThrottledStoreCount, throttledStoreCount)
	}
	for _, store := range sl.stores {
		actual = append(actual, int(store.StoreID))
	}
	sort.Ints(expected)
	sort.Ints(actual)
	if !reflect.DeepEqual(expected, actual) {
		return errors.Errorf("expected %+v stores, actual %+v", expected, actual)
	}
	return nil
}
Exemple #22
0
// CheckValuesOnNodes checks that all the nodes in the cluster have the same
// replicated data, generally used to check if a node can catch up with the logs
// correctly
func CheckValuesOnNodes(t *testing.T, clockSource *fakeclock.FakeClock, checkNodes map[uint64]*TestNode, ids []string, values []*api.Node) {
	iteration := 0
	for checkNodeID, node := range checkNodes {
		assert.NoError(t, PollFunc(clockSource, func() error {
			var err error
			node.MemoryStore().View(func(tx store.ReadTx) {
				var allNodes []*api.Node
				allNodes, err = store.FindNodes(tx, store.All)
				if err != nil {
					return
				}
				for i, id := range ids {
					n := store.GetNode(tx, id)
					if n == nil {
						err = errors.Errorf("node %s not found on %d (iteration %d)", id, checkNodeID, iteration)
						return
					}
					if !reflect.DeepEqual(values[i], n) {
						err = errors.Errorf("node %s did not match expected value on %d (iteration %d)", id, checkNodeID, iteration)
						return
					}
				}
				if len(allNodes) != len(ids) {
					err = errors.Errorf("expected %d nodes, got %d (iteration %d)", len(ids), len(allNodes), iteration)
					return
				}
			})
			return err
		}))
		iteration++
	}
}
Exemple #23
0
// setupOutboundStream sets up an output stream; if the stream is local, the
// RowChannel is looked up in the localStreams map; otherwise an outgoing
// mailbox is created.
func (f *Flow) setupOutboundStream(spec StreamEndpointSpec) (RowReceiver, error) {
	sid := spec.StreamID
	switch spec.Type {
	case StreamEndpointSpec_SYNC_RESPONSE:
		return f.syncFlowConsumer, nil

	case StreamEndpointSpec_REMOTE:
		outbox := newOutbox(&f.FlowCtx, spec.TargetAddr, f.id, sid)
		f.outboxes = append(f.outboxes, outbox)
		return outbox, nil

	case StreamEndpointSpec_LOCAL:
		rowChan, found := f.localStreams[sid]
		if !found {
			return nil, errors.Errorf("unconnected inbound stream %d", sid)
		}
		// Once we "connect" a stream, we set the value in the map to nil.
		if rowChan == nil {
			return nil, errors.Errorf("stream %d has multiple connections", sid)
		}
		f.localStreams[sid] = nil
		return rowChan, nil
	default:
		return nil, errors.Errorf("invalid stream type %d", spec.Type)
	}
}
Exemple #24
0
func checkResultType(typ parser.Type) error {
	// Compare all types that cannot rely on == equality.
	istype := typ.FamilyEqual
	switch {
	case istype(parser.TypeCollatedString):
		return nil
	case istype(parser.TypePlaceholder):
		return errors.Errorf("could not determine data type of %s", typ)
	case istype(parser.TypeTuple):
		return nil
	}
	// Compare all types that can rely on == equality.
	switch typ {
	case parser.TypeNull:
	case parser.TypeBool:
	case parser.TypeInt:
	case parser.TypeFloat:
	case parser.TypeDecimal:
	case parser.TypeBytes:
	case parser.TypeString:
	case parser.TypeDate:
	case parser.TypeTimestamp:
	case parser.TypeTimestampTZ:
	case parser.TypeInterval:
	case parser.TypeArray:
	default:
		return errors.Errorf("unsupported result type: %s", typ)
	}
	return nil
}
Exemple #25
0
func makeBSONRaw(in interface{}) (bson.Raw, error) {
	if in == nil {
		return bson.Raw{}, nil
	}
	var kind byte
	v := reflect.ValueOf(in)
	if v.Kind() == reflect.Ptr {
		if v.IsNil() {
			return bson.Raw{}, nil
		}
		v = v.Elem()
	}
	switch v.Kind() {
	case reflect.Map, reflect.Struct:
		kind = 3 // BSON "Document" kind
	case reflect.Array, reflect.Slice:
		kind = 4 // BSON "Array" kind
	default:
		return bson.Raw{}, errors.Errorf("cannot use type %T as event custom data", in)
	}
	data, err := bson.Marshal(in)
	if err != nil {
		return bson.Raw{}, err
	}
	if len(data) == 0 {
		return bson.Raw{}, errors.Errorf("invalid empty bson object for object %#v", in)
	}
	return bson.Raw{
		Kind: kind,
		Data: data,
	}, nil
}
Exemple #26
0
// GetUserHashedPassword returns the hashedPassword for the given username if
// found in system.users.
func GetUserHashedPassword(
	ctx context.Context, executor *Executor, metrics *MemoryMetrics, username string,
) ([]byte, error) {
	normalizedUsername := parser.Name(username).Normalize()
	// The root user is not in system.users.
	if normalizedUsername == security.RootUser {
		return nil, nil
	}

	var hashedPassword []byte
	if err := executor.cfg.DB.Txn(ctx, func(txn *client.Txn) error {
		p := makeInternalPlanner("get-pwd", txn, security.RootUser, metrics)
		defer finishInternalPlanner(p)
		const getHashedPassword = `SELECT hashedPassword FROM system.users ` +
			`WHERE username=$1`
		values, err := p.queryRow(getHashedPassword, normalizedUsername)
		if err != nil {
			return errors.Errorf("error looking up user %s", normalizedUsername)
		}
		if len(values) == 0 {
			return errors.Errorf("user %s does not exist", normalizedUsername)
		}
		hashedPassword = []byte(*(values[0].(*parser.DBytes)))
		return nil
	}); err != nil {
		return nil, err
	}

	return hashedPassword, nil
}
// checkGossip fetches the gossip infoStore from each node and invokes the given
// function. The test passes if the function returns 0 for every node,
// retrying for up to the given duration.
func checkGossip(t *testing.T, c cluster.Cluster, d time.Duration, f checkGossipFunc) {
	err := util.RetryForDuration(d, func() error {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
			return nil
		case <-time.After(1 * time.Second):
		}

		var infoStatus gossip.InfoStatus
		for i := 0; i < c.NumNodes(); i++ {
			if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/gossip/local", &infoStatus); err != nil {
				return err
			}
			if err := f(infoStatus.Infos); err != nil {
				return errors.Errorf("node %d: %s", i, err)
			}
		}

		return nil
	})
	if err != nil {
		t.Fatal(errors.Errorf("condition failed to evaluate within %s: %s", d, err))
	}
}
Exemple #28
0
// getTableSpan returns a span stored at a checkpoint idx, or in the absence
// of a checkpoint, the span over all keys within a table.
func (sc *SchemaChanger) getTableSpan(mutationIdx int) (roachpb.Span, error) {
	var tableDesc *sqlbase.TableDescriptor
	if err := sc.db.Txn(context.TODO(), func(txn *client.Txn) error {
		var err error
		tableDesc, err = sqlbase.GetTableDescFromID(txn, sc.tableID)
		return err
	}); err != nil {
		return roachpb.Span{}, err
	}
	if len(tableDesc.Mutations) < mutationIdx {
		return roachpb.Span{},
			errors.Errorf("cannot find idx %d among %d mutations", mutationIdx, len(tableDesc.Mutations))
	}
	if mutationID := tableDesc.Mutations[mutationIdx].MutationID; mutationID != sc.mutationID {
		return roachpb.Span{},
			errors.Errorf("mutation index pointing to the wrong schema change, %d vs expected %d", mutationID, sc.mutationID)
	}
	resumeSpan := tableDesc.Mutations[mutationIdx].ResumeSpan
	if resumeSpan.Key != nil {
		return resumeSpan, nil
	}
	prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID))
	return roachpb.Span{
		Key:    prefix,
		EndKey: prefix.PrefixEnd(),
	}, nil
}
Exemple #29
0
func translatePullError(err error, ref reference.Named) error {
	switch v := err.(type) {
	case errcode.Errors:
		if len(v) != 0 {
			for _, extra := range v[1:] {
				logrus.Infof("Ignoring extra error returned from registry: %v", extra)
			}
			return translatePullError(v[0], ref)
		}
	case errcode.Error:
		var newErr error
		switch v.Code {
		case errcode.ErrorCodeDenied:
			// ErrorCodeDenied is used when access to the repository was denied
			newErr = errors.Errorf("repository %s not found: does not exist or no read access", ref.Name())
		case v2.ErrorCodeManifestUnknown:
			newErr = errors.Errorf("manifest for %s not found", ref.String())
		case v2.ErrorCodeNameUnknown:
			newErr = errors.Errorf("repository %s not found", ref.Name())
		}
		if newErr != nil {
			logrus.Infof("Translating %q to %q", err, newErr)
			return newErr
		}
	case xfer.DoNotRetry:
		return translatePullError(v.Err, ref)
	}

	return err
}
Exemple #30
0
// RemoveTarget returns a suitable replica to remove from the provided replica
// set. It attempts to consider which of the provided replicas would be the best
// candidate for removal. It also will exclude any replica that belongs to the
// range lease holder's store ID.
//
// TODO(mrtracy): removeTarget eventually needs to accept the attributes from
// the zone config associated with the provided replicas. This will allow it to
// make correct decisions in the case of ranges with heterogeneous replica
// requirements (i.e. multiple data centers).
func (a Allocator) RemoveTarget(
	existing []roachpb.ReplicaDescriptor, leaseStoreID roachpb.StoreID,
) (roachpb.ReplicaDescriptor, error) {
	if len(existing) == 0 {
		return roachpb.ReplicaDescriptor{}, errors.Errorf("must supply at least one replica to allocator.RemoveTarget()")
	}

	// Retrieve store descriptors for the provided replicas from the StorePool.
	sl := StoreList{}
	for _, exist := range existing {
		if exist.StoreID == leaseStoreID {
			continue
		}
		desc, ok := a.storePool.getStoreDescriptor(exist.StoreID)
		if !ok {
			continue
		}
		sl.add(desc)
	}

	if bad := a.selectBad(sl); bad != nil {
		for _, exist := range existing {
			if exist.StoreID == bad.StoreID {
				return exist, nil
			}
		}
	}
	return roachpb.ReplicaDescriptor{}, errors.Errorf("RemoveTarget() could not select an appropriate replica to be remove")
}