// createTestAbortCache creates an in-memory engine and // returns a abort cache using the supplied Range ID. func createTestAbortCache( t *testing.T, rangeID roachpb.RangeID, stopper *stop.Stopper, ) (*AbortCache, engine.Engine) { eng := engine.NewInMem(roachpb.Attributes{}, 1<<20) stopper.AddCloser(eng) return NewAbortCache(rangeID), eng }
// TestingSetupZoneConfigHook initializes the zone config hook // to 'testingZoneConfigHook' which uses 'testingZoneConfig'. // Settings go back to their previous values when the stopper runs our closer. func TestingSetupZoneConfigHook(stopper *stop.Stopper) { stopper.AddCloser(stop.CloserFn(testingResetZoneConfigHook)) testingLock.Lock() defer testingLock.Unlock() if testingHasHook { panic("TestingSetupZoneConfigHook called without restoring state") } testingHasHook = true testingZoneConfig = make(zoneConfigMap) testingPreviousHook = ZoneConfigHook ZoneConfigHook = testingZoneConfigHook testingLargestIDHook = func(maxID uint32) (max uint32) { testingLock.Lock() defer testingLock.Unlock() for id := range testingZoneConfig { if maxID > 0 && id > maxID { continue } if id > max { max = id } } return } }
// New creates an instance of a gossip node. // The higher level manages the NodeIDContainer instance (which can be shared by // various server components). The ambient context is expected to already // contain the node ID. func New( ambient log.AmbientContext, nodeID *base.NodeIDContainer, rpcContext *rpc.Context, grpcServer *grpc.Server, resolvers []resolver.Resolver, stopper *stop.Stopper, registry *metric.Registry, ) *Gossip { ambient.SetEventLog("gossip", "gossip") g := &Gossip{ server: newServer(ambient, nodeID, stopper, registry), Connected: make(chan struct{}), rpcContext: rpcContext, outgoing: makeNodeSet(minPeers, metric.NewGauge(MetaConnectionsOutgoingGauge)), bootstrapping: map[string]struct{}{}, disconnected: make(chan *client, 10), stalledCh: make(chan struct{}, 1), stallInterval: defaultStallInterval, bootstrapInterval: defaultBootstrapInterval, cullInterval: defaultCullInterval, nodeDescs: map[roachpb.NodeID]*roachpb.NodeDescriptor{}, resolverAddrs: map[util.UnresolvedAddr]resolver.Resolver{}, bootstrapAddrs: map[util.UnresolvedAddr]roachpb.NodeID{}, } stopper.AddCloser(stop.CloserFn(g.server.AmbientContext.FinishEventLog)) registry.AddMetric(g.outgoing.gauge) g.clientsMu.breakers = map[string]*circuit.Breaker{} resolverAddrs := make([]string, len(resolvers)) for i, resolver := range resolvers { resolverAddrs[i] = resolver.Addr() } ctx := g.AnnotateCtx(context.Background()) if log.V(1) { log.Infof(ctx, "initial resolvers: %v", resolverAddrs) } g.SetResolvers(resolvers) g.mu.Lock() // Add ourselves as a SystemConfig watcher. g.mu.is.registerCallback(KeySystemConfig, g.updateSystemConfig) // Add ourselves as a node descriptor watcher. g.mu.is.registerCallback(MakePrefixPattern(KeyNodeIDPrefix), g.updateNodeAddress) g.mu.Unlock() RegisterGossipServer(grpcServer, g.server) return g }
func openStore(cmd *cobra.Command, dir string, stopper *stop.Stopper) (*engine.RocksDB, error) { cache := engine.NewRocksDBCache(512 << 20) defer cache.Release() maxOpenFiles, err := server.SetOpenFileLimitForOneStore() if err != nil { return nil, err } db, err := engine.NewRocksDB( roachpb.Attributes{}, dir, cache, 0, maxOpenFiles, ) if err != nil { return nil, err } stopper.AddCloser(db) return db, nil }