// GetResponse looks up a response matching the specified cmdID and // returns true if found. The response is deserialized into the // supplied reply parameter. If no response is found, returns // false. If a command is pending already for the cmdID, then this // method will block until the the command is completed or the // response cache is cleared. func (rc *ResponseCache) GetResponse(cmdID ClientCmdID, reply interface{}) (bool, error) { // Do nothing if command ID is empty. if cmdID.IsEmpty() { return false, nil } // If the command is inflight, wait for it to complete. rc.Lock() for { if cond, ok := rc.inflight[cmdID]; ok { cond.Wait() } else { break } } // Adding inflight here is preemptive; we don't want to hold lock // while fetching from the on-disk cache. The vast, vast majority of // calls to GetResponse will be cache misses, so this saves us // from acquiring the lock twice: once here and once below in the // event we experience a cache miss. rc.addInflightLocked(cmdID) rc.Unlock() // If the response is in the cache or we experienced an error, return. if ok, err := engine.GetI(rc.engine, rc.makeKey(cmdID), reply); ok || err != nil { rc.Lock() // Take lock after fetching response from cache. defer rc.Unlock() rc.removeInflightLocked(cmdID) return ok, err } // There's no command result cached for this ID; but inflight was added above. return false, nil }
// CreateRange allocates a new range ID and stores range metadata. // On success, returns the new range. func (s *Store) CreateRange(startKey, endKey engine.Key, replicas []Replica) (*Range, error) { rangeID, err := engine.Increment(s.engine, engine.KeyLocalRangeIDGenerator, 1) if err != nil { return nil, err } if ok, _ := engine.GetI(s.engine, makeRangeKey(rangeID), nil); ok { return nil, util.Error("newly allocated range ID already in use") } // RangeMetadata is stored local to this store only. It is neither // replicated via raft nor available via the global kv store. meta := RangeMetadata{ ClusterID: s.Ident.ClusterID, RangeID: rangeID, RangeDescriptor: RangeDescriptor{ StartKey: startKey, EndKey: endKey, Replicas: replicas, }, } err = engine.PutI(s.engine, makeRangeKey(rangeID), meta) if err != nil { return nil, err } rng := NewRange(meta, s.clock, s.engine, s.allocator, s.gossip) rng.Start() s.mu.Lock() defer s.mu.Unlock() s.ranges[rangeID] = rng return rng, nil }
// IsBootstrapped returns true if the store has already been // bootstrapped. If the store ident is corrupt, IsBootstrapped will // return true; the exact error can be retrieved via a call to Init(). func (s *Store) IsBootstrapped() bool { ok, err := engine.GetI(s.engine, engine.KeyLocalIdent, &s.Ident) if err != nil || ok { return true } return false }
// Init reads the StoreIdent from the underlying engine. func (s *Store) Init() error { ok, err := engine.GetI(s.engine, engine.KeyLocalIdent, &s.Ident) if err != nil { return err } else if !ok { return util.Error("store has not been bootstrapped") } // TODO(spencer): scan through all range metadata and instantiate // ranges. Right now we just get range ID hardcoded as 1. var meta RangeMetadata ok, err = engine.GetI(s.engine, makeRangeKey(1), &meta) if err != nil || !ok { return err } rng := NewRange(meta, s.clock, s.engine, s.allocator, s.gossip) rng.Start() s.mu.Lock() defer s.mu.Unlock() s.ranges[meta.RangeID] = rng return nil }
// HeartbeatTransaction updates the transaction status and heartbeat timestamp // on heartbeat message from a txn coordinator. The range will return the // current status of this transaction to the coordinator. func (r *Range) HeartbeatTransaction(args *HeartbeatTransactionRequest, reply *HeartbeatTransactionResponse) { var txn Transaction _, err := engine.GetI(r.engine, args.Key, &txn) if err != nil { reply.Error = err return } if txn.Status == PENDING { if !args.Timestamp.Less(txn.LastHeartbeat) { txn.LastHeartbeat = args.Timestamp } if err := engine.PutI(r.engine, args.Key, txn); err != nil { reply.Error = err return } } reply.Status = txn.Status }