コード例 #1
0
ファイル: server.go プロジェクト: danieldeb/cockroach
// serve connections on this listener until it is closed.
func (s *Server) serve(ln net.Listener) {
	for {
		conn, err := ln.Accept()
		if err != nil {
			if !s.isClosing() {
				log.Error(err)
			}
			return
		}

		s.mu.Lock()
		s.conns[conn] = struct{}{}
		s.mu.Unlock()

		go func() {
			defer func() {
				s.mu.Lock()
				delete(s.conns, conn)
				s.mu.Unlock()
				conn.Close()
			}()

			if err := s.serveConn(conn); err != nil {
				if err != io.EOF && !s.isClosing() {
					log.Error(err)
				}
			}
		}()
	}
}
コード例 #2
0
ファイル: status.go プロジェクト: backend2use/cockroachdb
// handleLocalLogFile handles GET requests for a single log. If no filename is
// available, it returns 404. The log contents are returned in structured
// format as JSON.
func (s *statusServer) handleLocalLogFile(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	log.Flush()
	file := ps.ByName("file")
	reader, err := log.GetLogReader(file, false /* !allowAbsolute */)
	if reader == nil || err != nil {
		log.Errorf("unable to open log file %s: %s", file, err)
		http.NotFound(w, r)
		return
	}
	defer reader.Close()

	entry := log.LogEntry{}
	var entries []log.LogEntry
	decoder := log.NewEntryDecoder(reader)
	for {
		if err := decoder.Decode(&entry); err != nil {
			if err == io.EOF {
				break
			}
			log.Error(err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}
		entries = append(entries, entry)
	}

	b, contentType, err := util.MarshalResponse(r, entries, []util.EncodingType{util.JSONEncoding})
	if err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	w.Header().Set(util.ContentTypeHeader, contentType)
	w.Write(b)
}
コード例 #3
0
ファイル: status.go プロジェクト: backend2use/cockroachdb
// handleStoreStatus handles GET requests for a single node's status. If no id
// is available, it calls handleStoresStatus to return all store's statuses.
func (s *statusServer) handleStoreStatus(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	id, err := strconv.ParseInt(ps.ByName("id"), 10, 32)
	if err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	key := keys.StoreStatusKey(int32(id))

	storeStatus := &storage.StoreStatus{}
	if err := s.db.GetProto(key, storeStatus); err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	b, contentType, err := util.MarshalResponse(r, storeStatus, []util.EncodingType{util.JSONEncoding})
	if err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	w.Header().Set(util.ContentTypeHeader, contentType)
	w.Write(b)
}
コード例 #4
0
ファイル: split_queue.go プロジェクト: yosiat/cockroach
// shouldQueue determines whether a range should be queued for
// splitting. This is true if the range is intersected by a zone config
// prefix or if the range's size in bytes exceeds the limit for the zone.
func (sq *splitQueue) shouldQueue(now proto.Timestamp, rng *Replica) (shouldQ bool, priority float64) {
	// Load the system config.
	cfg, err := sq.gossip.GetSystemConfig()
	if err != nil {
		log.Error(err)
		return
	}

	desc := rng.Desc()
	if len(cfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)) > 0 {
		// Set priority to 1 in the event the range is split by zone configs.
		priority = 1
		shouldQ = true
	}

	// Add priority based on the size of range compared to the max
	// size for the zone it's in.
	zone, err := cfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	if ratio := float64(rng.stats.GetSize()) / float64(zone.RangeMaxBytes); ratio > 1 {
		priority += ratio
		shouldQ = true
	}
	return
}
コード例 #5
0
ファイル: status.go プロジェクト: backend2use/cockroachdb
// handleStoresStatus handles GET requests for all store statuses.
func (s *statusServer) handleStoresStatus(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
	startKey := keys.StatusStorePrefix
	endKey := startKey.PrefixEnd()

	rows, err := s.db.Scan(startKey, endKey, 0)
	if err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}

	storeStatuses := []storage.StoreStatus{}
	for _, row := range rows {
		storeStatus := &storage.StoreStatus{}
		if err := row.ValueProto(storeStatus); err != nil {
			log.Error(err)
			w.WriteHeader(http.StatusInternalServerError)
			return
		}
		storeStatuses = append(storeStatuses, *storeStatus)
	}
	b, contentType, err := util.MarshalResponse(r, storeStatuses, []util.EncodingType{util.JSONEncoding})
	if err != nil {
		log.Error(err)
		w.WriteHeader(http.StatusInternalServerError)
		return
	}
	w.Header().Set(util.ContentTypeHeader, contentType)
	w.Write(b)
}
コード例 #6
0
ファイル: replicate_queue.go プロジェクト: yosiat/cockroach
func (rq replicateQueue) shouldQueue(now proto.Timestamp, repl *Replica) (shouldQ bool, priority float64) {
	// Load the system config.
	cfg, err := rq.gossip.GetSystemConfig()
	if err != nil {
		log.Error(err)
		return
	}

	desc := repl.Desc()
	if len(cfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)) > 0 {
		// If the replica's range needs splitting, wait until done.
		return
	}

	// Find the zone config for this range.
	zone, err := cfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	action, priority := rq.allocator.ComputeAction(*zone, repl.Desc())
	if action == aaNoop {
		return false, 0
	}
	return true, priority
}
コード例 #7
0
ファイル: user.go プロジェクト: greener98103/cockroach
// runSetUser invokes the REST API with POST action and username as
// path. Prompts for the password twice on stdin.
// TODO(marc): once we have more fields in the user config, we will need
// to allow changing just some of them (eg: change email, but leave password).
func runSetUser(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		cmd.Usage()
		return
	}
	hashed, err := security.PromptForPasswordAndHash()
	if err != nil {
		log.Error(err)
		return
	}
	// Build a UserConfig object. RunSetUser expects Yaml.
	// TODO(marc): re-work admin client library to take other encodings.
	pb := &proto.UserConfig{HashedPassword: hashed}
	contents, err := yaml.Marshal(pb)
	if err != nil {
		log.Error(err)
		return
	}
	admin := client.NewAdminClient(&Context.Context, Context.Addr, client.User)
	if err := admin.SetYAML(args[0], string(contents)); err != nil {
		log.Error(err)
		return
	}
	fmt.Printf("Wrote user config for %q\n", args[0])
}
コード例 #8
0
ファイル: log.go プロジェクト: mbertschler/cockroach
// runLog accesses creates a term log entry reader for each
// log file named in arguments.
func runLog(cmd *cobra.Command, args []string) {
	for _, arg := range args {
		reader, err := log.GetLogReader(arg, false /* !restricted */)
		if err != nil {
			log.Error(err)
			break
		}
		if _, err := io.Copy(os.Stdout, log.NewTermEntryReader(reader)); err != nil {
			log.Error(err)
			break
		}
		reader.Close()
	}
}
コード例 #9
0
ファイル: status.go プロジェクト: zhengchen1208/cockroach
// newStatusServer allocates and returns a statusServer.
func newStatusServer(db *client.DB, gossip *gossip.Gossip, ctx *Context) *statusServer {
	// Create an http client with a timeout
	tlsConfig, err := ctx.GetClientTLSConfig()
	if err != nil {
		log.Error(err)
		return nil
	}
	httpClient := &http.Client{
		Transport: &http.Transport{TLSClientConfig: tlsConfig},
		Timeout:   logEntriesTimeout,
	}

	server := &statusServer{
		db:          db,
		gossip:      gossip,
		router:      httprouter.New(),
		ctx:         ctx,
		proxyClient: httpClient,
	}

	server.router.GET(statusGossipKeyPrefix, server.handleGossipStatus)
	server.router.GET(statusLocalKeyPrefix, server.handleLocalStatus)
	server.router.GET(statusLocalLogFileKeyPrefix, server.handleLocalLogFiles)
	server.router.GET(statusLocalLogFileKeyPattern, server.handleLocalLogFile)
	server.router.GET(statusLogKeyPrefix, server.handleLocalLog)
	server.router.GET(statusLogKeyPattern, server.handleLogs)
	server.router.GET(statusLocalStacksKey, server.handleLocalStacks)
	server.router.GET(statusNodeKeyPrefix, server.handleNodesStatus)
	server.router.GET(statusNodeKeyPattern, server.handleNodeStatus)
	server.router.GET(statusStoreKeyPrefix, server.handleStoresStatus)
	server.router.GET(statusStoreKeyPattern, server.handleStoreStatus)
	server.router.GET(statusTransactionsKeyPrefix, server.handleTransactionStatus)

	return server
}
コード例 #10
0
func (rq *replicateQueue) shouldQueue(now roachpb.Timestamp, repl *Replica,
	sysCfg config.SystemConfig) (shouldQ bool, priority float64) {

	if repl.needsSplitBySize() {
		// If the range exceeds the split threshold, let that finish
		// first. Ranges must fit in memory on both sender and receiver
		// nodes while being replicated. This supplements the check
		// provided by acceptsUnsplitRanges, which looks at zone config
		// boundaries rather than data size.
		return
	}

	// Find the zone config for this range.
	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	action, priority := rq.allocator.ComputeAction(*zone, desc)
	if action != AllocatorNoop {
		return true, priority
	}
	// See if there is a rebalancing opportunity present.
	shouldRebalance := rq.allocator.ShouldRebalance(repl.store.StoreID())
	return shouldRebalance, 0
}
コード例 #11
0
ファイル: server.go プロジェクト: mbertschler/cockroach
// serve connections on this listener until it is closed.
func (s *Server) serve(ln net.Listener) {
	for {
		conn, err := ln.Accept()
		if err != nil {
			log.Error(err)
			return
		}

		s.conns = append(s.conns, conn)
		go func() {
			if err := s.serveConn(conn); err != nil {
				log.Error(err)
			}
		}()
	}
}
コード例 #12
0
ファイル: zone.go プロジェクト: alaypatel07/cockroach
// runGetZone retrieves the zone config for a given object id,
// and if present, outputs its YAML representation.
// TODO(marc): accept db/table names rather than IDs.
func runGetZone(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		mustUsage(cmd)
		return
	}
	id, err := strconv.Atoi(args[0])
	if err != nil {
		log.Errorf("could not parse object ID %s", args[0])
		return
	}

	db, _ := makeSQLClient()
	defer func() { _ = db.Close() }()
	// TODO(marc): switch to placeholders once they work with pgwire.
	_, rows, err := runQueryWithFormat(db, fmtMap{"config": formatZone},
		fmt.Sprintf(`SELECT * FROM system.zones WHERE id=%d`, id))
	if err != nil {
		log.Error(err)
		return
	}

	if len(rows) == 0 {
		log.Errorf("Object %d: no zone config found", id)
		return
	}
	fmt.Println(rows[0][1])
}
コード例 #13
0
ファイル: main.go プロジェクト: yangxuanjia/cockroach
func (z *zeroSum) maybeLogError(err error) {
	if strings.Contains(err.Error(), "range is frozen") {
		return
	}
	log.Error(context.Background(), err)
	atomic.AddUint64(&z.stats.errors, 1)
}
コード例 #14
0
func (r *Replica) verifyChecksumTrigger(
	ctx context.Context, args roachpb.VerifyChecksumRequest,
) {
	id := args.ChecksumID
	c, ok := r.getChecksum(ctx, id)
	if !ok {
		log.Errorf(ctx, "consistency check skipped: checksum for id = %v doesn't exist", id)
		// Return success because a checksum might be missing only on
		// this replica. A checksum might be missing because of a
		// number of reasons: GC-ed, server restart, and ComputeChecksum
		// version incompatibility.
		return
	}
	if c.checksum != nil && !bytes.Equal(c.checksum, args.Checksum) {
		// Replication consistency problem!
		logFunc := log.Errorf

		// Collect some more debug information.
		if args.Snapshot == nil {
			// No debug information; run another consistency check to deliver
			// more debug information.
			if err := r.store.stopper.RunAsyncTask(func() {
				log.Errorf(ctx, "%s: consistency check failed; fetching details", r)
				desc := r.Desc()
				startKey := desc.StartKey.AsRawKey()
				// Can't use a start key less than LocalMax.
				if bytes.Compare(startKey, keys.LocalMax) < 0 {
					startKey = keys.LocalMax
				}
				if err := r.store.db.CheckConsistency(startKey, desc.EndKey.AsRawKey(), true /* withDiff */); err != nil {
					log.Errorf(ctx, "couldn't rerun consistency check: %s", err)
				}
			}); err != nil {
				log.Error(ctx, errors.Wrap(err, "could not rerun consistency check"))
			}
		} else {
			// Compute diff.
			diff := diffRange(args.Snapshot, c.snapshot)
			if diff != nil {
				for _, d := range diff {
					l := "leader"
					if d.LeaseHolder {
						l = "replica"
					}
					log.Errorf(ctx, "consistency check failed: k:v = (%s (%x), %s, %x) not present on %s",
						d.Key, d.Key, d.Timestamp, d.Value, l)
				}
			}
			if r.store.ctx.ConsistencyCheckPanicOnFailure {
				if p := r.store.ctx.TestingKnobs.BadChecksumPanic; p != nil {
					p(diff)
				} else {
					logFunc = log.Fatalf
				}
			}
		}

		logFunc(ctx, "consistency check failed on replica: %s, checksum mismatch: e = %x, v = %x", args.Checksum, c.checksum)
	}
}
コード例 #15
0
ファイル: localcluster.go プロジェクト: duguruiyuan/cockroach
func (l *LocalCluster) runDockerSpy() {
	l.panicOnStop()

	create := func() (*Container, error) {
		return createContainer(l,
			container.Config{
				Image: dockerspyImage + ":" + dockerspyTag,
				Cmd:   strslice.New("--dns-domain=" + domain),
			}, container.HostConfig{
				Binds:           []string{"/var/run/docker.sock:/var/run/docker.sock"},
				PublishAllPorts: true,
			},
			"docker-spy",
		)
	}
	c, err := create()
	if dockerclient.IsErrImageNotFound(err) {
		if err := pullImage(l, types.ImagePullOptions{ImageID: dockerspyImage, Tag: dockerspyTag}); err != nil {
			log.Fatal(err)
		}

		c, err = create()
	}
	maybePanic(err)
	maybePanic(c.Start())
	l.dns = c
	if ci, err := c.Inspect(); err != nil {
		log.Error(err)
	} else {
		log.Infof("started %s: %s", c.Name(), ci.NetworkSettings.IPAddress)
	}
}
コード例 #16
0
func (rq replicateQueue) shouldQueue(now proto.Timestamp, repl *Replica) (shouldQ bool, priority float64) {
	// If the replica's range spans multiple zones, ignore it until the split
	// queue has processed it.
	if len(computeSplitKeys(rq.gossip, repl)) > 0 {
		return
	}

	// Load the zone config to find the desired replica attributes.
	zone, err := lookupZoneConfig(rq.gossip, repl)
	if err != nil {
		log.Error(err)
		return
	}

	delta := rq.replicaDelta(zone, repl, repl.Desc())
	if delta == 0 {
		if log.V(1) {
			log.Infof("%s has the correct number of nodes", repl)
		}
		return false, 0
	}
	if delta > 0 {
		if log.V(1) {
			log.Infof("%s needs to add %d nodes", repl, delta)
		}
		// For ranges which need additional replicas, increase the priority
		return true, float64(delta + 10)
	}
	if log.V(1) {
		log.Infof("%s needs to remove %d nodes", repl, 0-delta)
	}
	// For ranges which have too many replicas, priority is absolute value of
	// the delta.
	return true, float64(0 - delta)
}
コード例 #17
0
ファイル: gossip.go プロジェクト: liugangnhm/cockroach
// SetStorage provides an instance of the Storage interface
// for reading and writing gossip bootstrap data from persistent
// storage. This should be invoked as early in the lifecycle of a
// gossip instance as possible, but can be called at any time.
func (g *Gossip) SetStorage(storage Storage) error {
	g.mu.Lock()
	defer g.mu.Unlock()
	g.storage = storage

	// Read the bootstrap info from the persistent store.
	var storedBI BootstrapInfo
	err := storage.ReadBootstrapInfo(&storedBI)
	if err != nil {
		log.Warningf("failed to read gossip bootstrap info: %s", err)
	}

	// Merge the stored bootstrap info addresses with any we've become
	// aware of through gossip.
	if len(g.bootstrapInfo.Addresses) > 0 {
		existing := map[string]struct{}{}
		makeKey := func(a util.UnresolvedAddr) string { return fmt.Sprintf("%s,%s", a.Network(), a.String()) }
		for _, addr := range g.bootstrapInfo.Addresses {
			existing[makeKey(addr)] = struct{}{}
		}
		for _, addr := range storedBI.Addresses {
			// If the address is new, and isn't our own address, add it.
			if _, ok := existing[makeKey(addr)]; !ok && addr != g.is.NodeAddr {
				g.maybeAddBootstrapAddress(addr)
			}
		}
		// Persist merged addresses.
		if numAddrs := len(g.bootstrapInfo.Addresses); numAddrs > len(storedBI.Addresses) {
			if err := g.storage.WriteBootstrapInfo(&g.bootstrapInfo); err != nil {
				log.Error(err)
			}
		}
	} else {
		g.bootstrapInfo = storedBI
	}

	// Cycle through all persisted bootstrap hosts and add resolvers for
	// any which haven't already been added.
	newResolverFound := false
	for _, addr := range g.bootstrapInfo.Addresses {
		if !g.maybeAddResolver(addr) {
			continue
		}
		// If we find a new resolver, reset the resolver index so that the
		// next resolver we try is the first of the new resolvers.
		if !newResolverFound {
			newResolverFound = true
			g.resolverIdx = len(g.resolvers) - 1
		}
	}

	// If a new resolver was found, immediately signal bootstrap.
	if newResolverFound {
		if log.V(1) {
			log.Infof("found new resolvers from storage; signalling bootstrap")
		}
		g.signalStalled()
	}
	return nil
}
コード例 #18
0
ファイル: status.go プロジェクト: JKhawaja/cockroach
// newStatusServer allocates and returns a statusServer.
func newStatusServer(
	db *client.DB,
	gossip *gossip.Gossip,
	metricSource json.Marshaler,
	ctx *base.Context,
	rpcCtx *rpc.Context,
	stores *storage.Stores,
) *statusServer {
	// Create an http client with a timeout
	httpClient, err := ctx.GetHTTPClient()
	if err != nil {
		log.Error(err)
		return nil
	}

	server := &statusServer{
		db:           db,
		gossip:       gossip,
		metricSource: metricSource,
		router:       httprouter.New(),
		rpcCtx:       rpcCtx,
		proxyClient:  httpClient,
		stores:       stores,
	}

	server.router.GET(statusLogFilesListPattern, server.handleLogFilesList)
	server.router.GET(statusLogFilePattern, server.handleLogFile)
	server.router.GET(statusLogsPattern, server.handleLogs)
	// TODO(tschottdorf): significant overlap with /debug/pprof/goroutine,
	// except that this one allows querying by NodeID.
	server.router.GET(statusStacksPattern, server.handleStacks)
	server.router.GET(statusMetricsPattern, server.handleMetrics)

	return server
}
コード例 #19
0
func (rq *replicateQueue) shouldQueue(
	now hlc.Timestamp,
	repl *Replica,
	sysCfg config.SystemConfig,
) (shouldQ bool, priority float64) {
	if !repl.store.splitQueue.Disabled() && repl.needsSplitBySize() {
		// If the range exceeds the split threshold, let that finish first.
		// Ranges must fit in memory on both sender and receiver nodes while
		// being replicated. This supplements the check provided by
		// acceptsUnsplitRanges, which looks at zone config boundaries rather
		// than data size.
		//
		// This check is ignored if the split queue is disabled, since in that
		// case, the split will never come.
		return
	}

	// Find the zone config for this range.
	desc := repl.Desc()
	zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
	if err != nil {
		log.Error(err)
		return
	}

	action, priority := rq.allocator.ComputeAction(*zone, desc)
	if action != AllocatorNoop {
		return true, priority
	}
	// See if there is a rebalancing opportunity present.
	shouldRebalance := rq.allocator.ShouldRebalance(repl.store.StoreID())
	return shouldRebalance, 0
}
コード例 #20
0
ファイル: transport.go プロジェクト: yangxuanjia/cockroach
// SendNext invokes the specified RPC on the supplied client when the
// client is ready. On success, the reply is sent on the channel;
// otherwise an error is sent.
func (gt *grpcTransport) SendNext(done chan BatchCall) {
	client := gt.orderedClients[0]
	gt.orderedClients = gt.orderedClients[1:]

	addr := client.remoteAddr
	if log.V(2) {
		log.Infof(gt.opts.Context, "sending request to %s: %+v", addr, client.args)
	}

	if localServer := gt.rpcContext.GetLocalInternalServerForAddr(addr); enableLocalCalls && localServer != nil {
		ctx, cancel := gt.opts.contextWithTimeout()
		defer cancel()

		reply, err := localServer.Batch(ctx, &client.args)
		done <- BatchCall{Reply: reply, Err: err}
		return
	}

	go func() {
		ctx, cancel := gt.opts.contextWithTimeout()
		defer cancel()

		reply, err := client.client.Batch(ctx, &client.args)
		if reply != nil {
			for i := range reply.Responses {
				if err := reply.Responses[i].GetInner().Verify(client.args.Requests[i].GetInner()); err != nil {
					log.Error(ctx, err)
				}
			}
		}
		done <- BatchCall{Reply: reply, Err: err}
	}()
}
コード例 #21
0
ファイル: client.go プロジェクト: liugangnhm/cockroach
// start dials the remote addr and commences gossip once connected.
// Upon exit, the client is sent on the disconnected channel.
// If the client experienced an error, its err field will
// be set. This method starts client processing in a goroutine and
// returns immediately.
func (c *client) start(g *Gossip, disconnected chan *client, ctx *rpc.Context, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		defer func() {
			disconnected <- c
		}()

		conn, err := ctx.GRPCDial(c.addr.String())
		if err != nil {
			log.Errorf("failed to dial: %v", err)
			return
		}
		defer func() {
			if err := conn.Close(); err != nil {
				log.Error(err)
			}
		}()

		// Start gossiping.
		if err := c.gossip(g, NewGossipClient(conn), stopper); err != nil {
			if !grpcutil.IsClosedConnection(err) {
				g.mu.Lock()
				peerID := c.peerID
				g.mu.Unlock()
				if peerID != 0 {
					log.Infof("closing client to node %d (%s): %s", peerID, c.addr, err)
				} else {
					log.Infof("closing client to %s: %s", c.addr, err)
				}
			}
		}
	})
}
コード例 #22
0
ファイル: localcluster.go プロジェクト: danieldeb/cockroach
func (l *LocalCluster) runDockerSpy() {
	l.panicOnStop()

	create := func() (*Container, error) {
		return createContainer(l, dockerclient.ContainerConfig{
			Image: dockerspyImage,
			Cmd:   []string{"--dns-domain=" + domain},
		})
	}
	c, err := create()
	if err == dockerclient.ErrImageNotFound {
		log.Infof("pulling %s", dockerspyImage)
		err = l.client.PullImage(dockerspyImage, nil)
		if err == nil {
			c, err = create()
		}
	}
	if err != nil {
		panic(err)
	}
	maybePanic(c.Start([]string{"/var/run/docker.sock:/var/run/docker.sock"}, nil, nil))
	c.Name = "docker-spy"
	l.dns = c
	if ci, err := c.Inspect(); err != nil {
		log.Error(err)
	} else {
		log.Infof("started %s: %s", c.Name, ci.NetworkSettings.IPAddress)
	}
}
コード例 #23
0
ファイル: zone.go プロジェクト: harryyeh/cockroach
// runGetZone retrieves the zone config for a given object id,
// and if present, outputs its YAML representation.
// TODO(marc): accept db/table names rather than IDs.
func runGetZone(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		mustUsage(cmd)
		return
	}
	id, err := strconv.Atoi(args[0])
	if err != nil {
		log.Errorf("could not parse object ID %s", args[0])
		return
	}

	db := makeSQLClient()
	_, rows, err := runQueryWithFormat(db, fmtMap{"config": formatZone},
		`SELECT * FROM system.zones WHERE id=$1`, id)
	if err != nil {
		log.Error(err)
		return
	}

	if len(rows) == 0 {
		log.Errorf("Object %d: no zone config found", id)
		return
	}
	fmt.Fprintln(osStdout, rows[0][1])
}
コード例 #24
0
ファイル: v3.go プロジェクト: JKhawaja/cockroach
func (c *v3Conn) finish() {
	// This is better than always flushing on error.
	if err := c.wr.Flush(); err != nil {
		log.Error(err)
	}
	_ = c.conn.Close()
	c.session.Finish()
}
コード例 #25
0
ファイル: user.go プロジェクト: husttom/cockroach
// runSetUser invokes the REST API with POST action and username as
// path. Prompts for the password twice on stdin.
// TODO(marc): once we have more fields in the user config, we will need
// to allow changing just some of them (eg: change email, but leave password).
func runSetUser(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		cmd.Usage()
		return
	}
	hashed, err := security.PromptForPasswordAndHash()
	if err != nil {
		log.Error(err)
		return
	}
	db := makeSQLClient()
	err = runQuery(db, `INSERT INTO system.users VALUES ($1, $2)`, args[0], hashed)
	if err != nil {
		log.Error(err)
		return
	}
}
コード例 #26
0
ファイル: zone.go プロジェクト: harryyeh/cockroach
// runSetZone parses the yaml input file, converts it to proto,
// and inserts it in the system.zones table.
// TODO(marc): accept db/table names rather than IDs.
func runSetZone(cmd *cobra.Command, args []string) {
	if len(args) != 2 {
		mustUsage(cmd)
		return
	}
	id, err := strconv.Atoi(args[0])
	if err != nil {
		log.Errorf("could not parse object ID %s", args[0])
		return
	}

	// Read in the config file.
	body, err := ioutil.ReadFile(args[1])
	if err != nil {
		log.Errorf("unable to read zone config file %q: %s", args[1], err)
		return
	}

	// Convert it to proto and marshal it again to put into the table.
	// This is a bit more tedious than taking protos directly,
	// but yaml is a more widely understood format.
	var pbZoneConfig config.ZoneConfig
	if err := yaml.Unmarshal(body, &pbZoneConfig); err != nil {
		log.Errorf("unable to parse zone config file %q: %s", args[1], err)
		return
	}

	if err := pbZoneConfig.Validate(); err != nil {
		log.Error(err)
		return
	}

	buf, err := gogoproto.Marshal(&pbZoneConfig)
	if err != nil {
		log.Errorf("unable to parse zone config file %q: %s", args[1], err)
		return
	}

	db := makeSQLClient()
	// TODO(marc): switch to UPSERT.
	err = runPrettyQuery(db, `INSERT INTO system.zones VALUES ($1, $2)`, id, buf)
	if err != nil {
		log.Error(err)
		return
	}
}
コード例 #27
0
ファイル: status.go プロジェクト: yangxuanjia/cockroach
func (s *statusServer) handleVars(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	w.Header().Set(util.ContentTypeHeader, util.PlaintextContentType)
	err := s.metricSource.PrintAsText(w)
	if err != nil {
		log.Error(context.TODO(), err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
	}
}
コード例 #28
0
ファイル: status.go プロジェクト: JKhawaja/cockroach
func (s *statusServer) handleMetrics(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	resp, err := s.Metrics(context.TODO(), &MetricsRequest{NodeId: ps.ByName("node_id")})
	if err != nil {
		log.Error(err)
		http.Error(w, err.Error(), http.StatusInternalServerError)
	}
	writeJSONResponse(w, resp)
}
コード例 #29
0
ファイル: gossip.go プロジェクト: binlijin/cockroach
// updateNodeAddress is a gossip callback which fires with each
// update to the node address. This allows us to compute the
// total size of the gossip network (for determining max peers
// each gossip node is allowed to have), as well as to create
// new resolvers for each encountered host and to write the
// set of gossip node addresses to persistent storage when it
// changes.
func (g *Gossip) updateNodeAddress(_ string, content roachpb.Value) {
	var desc roachpb.NodeDescriptor
	if err := content.GetProto(&desc); err != nil {
		log.Error(err)
		return
	}

	g.mu.Lock()
	defer g.mu.Unlock()

	// Recompute max peers based on size of network and set the max
	// sizes for incoming and outgoing node sets.
	defer func() {
		maxPeers := g.maxPeers(len(g.nodeDescs))
		g.incoming.setMaxSize(maxPeers)
		g.outgoing.setMaxSize(maxPeers)
	}()

	// Skip if the node has already been seen or it's our own address.
	if _, ok := g.nodeDescs[desc.NodeID]; ok || desc.Address == g.is.NodeAddr {
		return
	}
	g.nodeDescs[desc.NodeID] = &desc

	// Add this new node to our list of resolvers so we can keep
	// connecting to gossip if the original resolvers go offline.
	r, err := resolver.NewResolverFromUnresolvedAddr(desc.Address)
	if err != nil {
		log.Warningf("bad address from gossip node %s: %s", desc, err)
		return
	}
	if !g.haveResolver(r) {
		g.resolvers = append(g.resolvers, r)
	}
	// Add new address to bootstrap info and persist if possible.
	if !g.haveBootstrapAddress(desc.Address) {
		g.bootstrapInfo.Addresses = append(g.bootstrapInfo.Addresses, desc.Address)
		if g.storage != nil {
			// TODO(spencer): need to clean up ancient gossip nodes, which
			//   will otherwise stick around in the bootstrap info forever.
			if err := g.storage.WriteBootstrapInfo(&g.bootstrapInfo); err != nil {
				log.Error(err)
			}
		}
	}
}
コード例 #30
0
ファイル: user.go プロジェクト: knorwood/cockroach
// runSetUser invokes the REST API with POST action and username as
// path. Prompts for the password twice on stdin.
// TODO(marc): once we have more fields in the user config, we will need
// to allow changing just some of them (eg: change email, but leave password).
func runSetUser(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		cmd.Usage()
		return
	}
	hashed, err := security.PromptForPasswordAndHash()
	if err != nil {
		log.Error(err)
		return
	}
	db := makeSQLClient()
	err = processOneLine(db, fmt.Sprintf(`INSERT INTO system.users VALUES ('%s','%s')`, args[0], hashed))
	if err != nil {
		log.Error(err)
		return
	}
}