func newRangeDataIterator(r *Range, e engine.Engine) *rangeDataIterator { r.RLock() startKey := r.Desc().StartKey if startKey.Equal(engine.KeyMin) { startKey = engine.KeyLocalMax } endKey := r.Desc().EndKey r.RUnlock() ri := &rangeDataIterator{ ranges: []keyRange{ { start: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(r.Desc().RaftID)))), end: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeIDPrefix, encoding.EncodeUvarint(nil, uint64(r.Desc().RaftID+1)))), }, { start: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeKeyPrefix, encoding.EncodeBytes(nil, startKey))), end: engine.MVCCEncodeKey(engine.MakeKey(engine.KeyLocalRangeKeyPrefix, encoding.EncodeBytes(nil, endKey))), }, { start: engine.MVCCEncodeKey(startKey), end: engine.MVCCEncodeKey(endKey), }, }, iter: e.NewIterator(), } ri.iter.Seek(ri.ranges[ri.curIndex].start) ri.advance() return ri }
// TestInternalPushTxnAlreadyCommittedOrAborted verifies success // (noop) in event that pushee is already committed or aborted. func TestInternalPushTxnAlreadyCommittedOrAborted(t *testing.T) { rng, _, clock, _ := createTestRangeWithClock(t) defer rng.Stop() for i, status := range []proto.TransactionStatus{proto.COMMITTED, proto.ABORTED} { key := engine.Key(fmt.Sprintf("key-%d", i)) pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock) pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock) pusher.Priority = 1 pushee.Priority = 2 // pusher will lose, meaning we shouldn't push unless pushee is already ended. // End the pushee's transaction. etArgs, etReply := endTxnArgs(pushee, status == proto.COMMITTED, 0) etArgs.Timestamp = pushee.Timestamp if err := rng.ReadWriteCmd("EndTransaction", etArgs, etReply); err != nil { t.Fatal(err) } // Now try to push what's already committed or aborted. args, reply := pushTxnArgs(pusher, pushee, true, 0) if err := rng.ReadWriteCmd("InternalPushTxn", args, reply); err != nil { t.Fatal(err) } if reply.PusheeTxn.Status != status { t.Errorf("expected push txn to return with status == %s; got %+v", status, reply.PusheeTxn) } } }
// BootstrapConfigs sets default configurations for accounting, // permissions, and zones. All configs are specified for the empty key // prefix, meaning they apply to the entire database. Permissions are // granted to all users and the zone requires three replicas with no // other specifications. func BootstrapConfigs(db DB, timestamp proto.Timestamp) error { // Accounting config. acctConfig := &proto.AcctConfig{} key := engine.MakeKey(engine.KeyConfigAccountingPrefix, engine.KeyMin) if err := PutProto(db, key, acctConfig, timestamp); err != nil { return err } // Permission config. permConfig := &proto.PermConfig{ Read: []string{UserRoot}, // root user Write: []string{UserRoot}, // root user } key = engine.MakeKey(engine.KeyConfigPermissionPrefix, engine.KeyMin) if err := PutProto(db, key, permConfig, timestamp); err != nil { return err } // Zone config. // TODO(spencer): change this when zone specifications change to elect for three // replicas with no specific features set. zoneConfig := &proto.ZoneConfig{ Replicas: []proto.Attributes{ proto.Attributes{}, proto.Attributes{}, proto.Attributes{}, }, RangeMinBytes: 1048576, RangeMaxBytes: 67108864, } key = engine.MakeKey(engine.KeyConfigZonePrefix, engine.KeyMin) if err := PutProto(db, key, zoneConfig, timestamp); err != nil { return err } return nil }
// TestInternalPushTxnUpgradeExistingTxn verifies that pushing // a transaction record with a new epoch upgrades the pushee's // epoch and timestamp if greater. In all test cases, the // priorities are set such that the push will succeed. func TestInternalPushTxnUpgradeExistingTxn(t *testing.T) { rng, _, clock, _ := createTestRangeWithClock(t) defer rng.Stop() ts1 := proto.Timestamp{WallTime: 1} ts2 := proto.Timestamp{WallTime: 2} testCases := []struct { startEpoch, epoch, expEpoch int32 startTS, ts, expTS proto.Timestamp }{ // Move epoch forward. {0, 1, 1, ts1, ts1, ts1}, // Move timestamp forward. {0, 0, 0, ts1, ts2, ts2}, // Move epoch backwards (has no effect). {1, 0, 1, ts1, ts1, ts1}, // Move timestamp backwards (has no effect). {0, 0, 0, ts2, ts1, ts2}, // Move both epoch & timestamp forward. {0, 1, 1, ts1, ts2, ts2}, // Move both epoch & timestamp backward (has no effect). {1, 0, 1, ts2, ts1, ts2}, } for i, test := range testCases { key := engine.Key(fmt.Sprintf("key-%d", i)) pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock) pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock) pushee.Priority = 1 pusher.Priority = 2 // Pusher will win. // First, establish "start" of existing pushee's txn via heartbeat. pushee.Epoch = test.startEpoch pushee.Timestamp = test.startTS hbArgs, hbReply := heartbeatArgs(pushee, 0) hbArgs.Timestamp = pushee.Timestamp if err := rng.ReadWriteCmd("InternalHeartbeatTxn", hbArgs, hbReply); err != nil { t.Fatal(err) } // Now, attempt to push the transaction using updated values for epoch & timestamp. pushee.Epoch = test.epoch pushee.Timestamp = test.ts args, reply := pushTxnArgs(pusher, pushee, true, 0) if err := rng.ReadWriteCmd("InternalPushTxn", args, reply); err != nil { t.Fatal(err) } expTxn := gogoproto.Clone(pushee).(*proto.Transaction) expTxn.Epoch = test.expEpoch expTxn.Timestamp = test.expTS expTxn.Status = proto.ABORTED expTxn.LastHeartbeat = &test.startTS if !reflect.DeepEqual(expTxn, reply.PusheeTxn) { t.Errorf("unexpected push txn in trial %d; expected %+v, got %+v", i, expTxn, reply.PusheeTxn) } } }
// BootstrapRangeDescriptor sets meta1 and meta2 values for KeyMax, // using the provided replica. func BootstrapRangeDescriptor(db DB, desc *proto.RangeDescriptor, timestamp proto.Timestamp) error { // Write meta1. if err := PutProto(db, engine.MakeKey(engine.KeyMeta1Prefix, engine.KeyMax), desc, timestamp); err != nil { return err } // Write meta2. if err := PutProto(db, engine.MakeKey(engine.KeyMeta2Prefix, engine.KeyMax), desc, timestamp); err != nil { return err } return nil }
// TestRangeGossipConfigWithMultipleKeyPrefixes verifies that multiple // key prefixes for a config are gossipped. func TestRangeGossipConfigWithMultipleKeyPrefixes(t *testing.T) { e := createTestEngine(t) // Add a permission for a new key prefix. db1Perm := proto.PermConfig{ Read: []string{"spencer", "foo", "bar", "baz"}, Write: []string{"spencer"}, } key := engine.MakeKey(engine.KeyConfigPermissionPrefix, engine.Key("/db1")) if err := engine.PutProto(e, key, &db1Perm); err != nil { t.Fatal(err) } r, g := createTestRange(e, t) defer r.Stop() info, err := g.GetInfo(gossip.KeyConfigPermission) if err != nil { t.Fatal(err) } configMap := info.(PrefixConfigMap) expConfigs := []*PrefixConfig{ &PrefixConfig{engine.KeyMin, nil, &testDefaultPermConfig}, &PrefixConfig{engine.Key("/db1"), nil, &db1Perm}, &PrefixConfig{engine.Key("/db2"), engine.KeyMin, &testDefaultPermConfig}, } if !reflect.DeepEqual([]*PrefixConfig(configMap), expConfigs) { t.Errorf("expected gossiped configs to be equal %s vs %s", configMap, expConfigs) } }
// TestCoordinatorHeartbeat verifies periodic heartbeat of the // transaction record. func TestCoordinatorHeartbeat(t *testing.T) { db, _, manual := createTestDB(t) defer db.Close() // Set heartbeat interval to 1ms for testing. db.coordinator.heartbeatInterval = 1 * time.Millisecond txnID := engine.Key("txn") <-db.Put(createPutRequest(engine.Key("a"), []byte("value"), txnID)) // Verify 3 heartbeats. var heartbeatTS proto.Timestamp for i := 0; i < 3; i++ { if err := util.IsTrueWithin(func() bool { ok, txn, err := getTxn(db, engine.MakeKey(engine.KeyLocalTransactionPrefix, txnID)) if !ok || err != nil { return false } // Advance clock by 1ns. // Locking the coordinator to prevent a data race. db.coordinator.Lock() *manual = hlc.ManualClock(*manual + 1) db.coordinator.Unlock() if heartbeatTS.Less(*txn.LastHeartbeat) { heartbeatTS = *txn.LastHeartbeat return true } return false }, 50*time.Millisecond); err != nil { t.Error("expected initial heartbeat within 50ms") } } }
// PutSchema inserts s into the kv store for subsequent // usage by clients. func (db *structuredDB) PutSchema(s *Schema) error { if err := s.Validate(); err != nil { return err } k := engine.MakeKey(engine.KeySchemaPrefix, engine.Key(s.Key)) return storage.PutI(db.kvDB, k, s, proto.Timestamp{}) }
// TestRangeGossipConfigUpdates verifies that writes to the // permissions cause the updated configs to be re-gossipped. func TestRangeGossipConfigUpdates(t *testing.T) { r, g := createTestRange(createTestEngine(t), t) defer r.Stop() // Add a permission for a new key prefix. db1Perm := proto.PermConfig{ Read: []string{"spencer"}, Write: []string{"spencer"}, } key := engine.MakeKey(engine.KeyConfigPermissionPrefix, engine.Key("/db1")) reply := &proto.PutResponse{} data, err := gogoproto.Marshal(&db1Perm) if err != nil { t.Fatal(err) } r.Put(&proto.PutRequest{RequestHeader: proto.RequestHeader{Key: key}, Value: proto.Value{Bytes: data}}, reply) if reply.Error != nil { t.Fatal(reply.GoError()) } info, err := g.GetInfo(gossip.KeyConfigPermission) if err != nil { t.Fatal(err) } configMap := info.(PrefixConfigMap) expConfigs := []*PrefixConfig{ &PrefixConfig{engine.KeyMin, nil, &testDefaultPermConfig}, &PrefixConfig{engine.Key("/db1"), nil, &db1Perm}, &PrefixConfig{engine.Key("/db2"), engine.KeyMin, &testDefaultPermConfig}, } if !reflect.DeepEqual([]*PrefixConfig(configMap), expConfigs) { t.Errorf("expected gossiped configs to be equal %s vs %s", configMap, expConfigs) } }
// InternalHeartbeatTxn updates the transaction status and heartbeat // timestamp after receiving transaction heartbeat messages from // coordinator. Returns the udpated transaction. func (r *Range) InternalHeartbeatTxn(args *proto.InternalHeartbeatTxnRequest, reply *proto.InternalHeartbeatTxnResponse) { // Create the actual key to the system-local transaction table. key := engine.MakeKey(engine.KeyLocalTransactionPrefix, args.Key) var txn proto.Transaction ok, err := engine.GetProto(r.engine, key, &txn) if err != nil { reply.SetGoError(err) return } // If no existing transaction record was found, initialize // to the transaction in the request header. if !ok { gogoproto.Merge(&txn, args.Txn) } if txn.Status == proto.PENDING { if txn.LastHeartbeat == nil { txn.LastHeartbeat = &proto.Timestamp{} } if txn.LastHeartbeat.Less(args.Header().Timestamp) { *txn.LastHeartbeat = args.Header().Timestamp } if err := engine.PutProto(r.engine, key, &txn); err != nil { reply.SetGoError(err) return } } reply.Txn = &txn }
// DeleteSchema removes s from the kv store. func (db *structuredDB) DeleteSchema(s *Schema) error { return (<-db.kvDB.Delete(&proto.DeleteRequest{ RequestHeader: proto.RequestHeader{ Key: engine.MakeKey(engine.KeySchemaPrefix, engine.Key(s.Key)), }, })).GoError() }
func newTestMetadataDB() *testMetadataDB { db := &testMetadataDB{} db.data.Insert(testMetadataNode{ &proto.RangeDescriptor{ StartKey: engine.MakeKey(engine.KeyMeta2Prefix, engine.KeyMin), EndKey: engine.MakeKey(engine.KeyMeta2Prefix, engine.KeyMax), }, }) db.data.Insert(testMetadataNode{ &proto.RangeDescriptor{ StartKey: engine.KeyMetaMax, EndKey: engine.KeyMax, }, }) return db }
// GetSchema returns the Schema with the given key, or nil if // one does not exist. A nil error is returned when a schema // with the given key cannot be found. func (db *structuredDB) GetSchema(key string) (*Schema, error) { s := &Schema{} k := engine.MakeKey(engine.KeySchemaPrefix, engine.Key(key)) found, _, err := storage.GetI(db.kvDB, k, s) if err != nil || !found { s = nil } return s, err }
// TestInternalPushTxnHeartbeatTimeout verifies that a txn which // hasn't been heartbeat within 2x the heartbeat interval can be // aborted. func TestInternalPushTxnHeartbeatTimeout(t *testing.T) { rng, mc, clock, _ := createTestRangeWithClock(t) defer rng.Stop() ts := proto.Timestamp{WallTime: 1} ns := DefaultHeartbeatInterval.Nanoseconds() testCases := []struct { heartbeat *proto.Timestamp // nil indicates no heartbeat currentTime int64 // nanoseconds expSuccess bool }{ {nil, 0, false}, {nil, ns, false}, {nil, ns*2 - 1, false}, {nil, ns * 2, false}, {&ts, ns*2 + 1, false}, {&ts, ns*2 + 2, true}, } for i, test := range testCases { key := engine.Key(fmt.Sprintf("key-%d", i)) pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock) pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock) pushee.Priority = 2 pusher.Priority = 1 // Pusher won't win based on priority. // First, establish "start" of existing pushee's txn via heartbeat. if test.heartbeat != nil { hbArgs, hbReply := heartbeatArgs(pushee, 0) hbArgs.Timestamp = *test.heartbeat if err := rng.ReadWriteCmd("InternalHeartbeatTxn", hbArgs, hbReply); err != nil { t.Fatal(err) } } // Now, attempt to push the transaction with clock set to "currentTime". *mc = hlc.ManualClock(test.currentTime) args, reply := pushTxnArgs(pusher, pushee, true, 0) err := rng.ReadWriteCmd("InternalPushTxn", args, reply) if test.expSuccess != (err == nil) { t.Errorf("expected success on trial %d? %t; got err %v", i, test.expSuccess, err) } } }
// Get retrieves the zone configuration for the specified key. If the // key is empty, all zone configurations are returned. Otherwise, the // leading "/" path delimiter is stripped and the zone configuration // matching the remainder is retrieved. Note that this will retrieve // the default zone config if "key" is equal to "/", and will list all // configs if "key" is equal to "". The body result contains // JSON-formatted output for a listing of keys and YAML-formatted // output for retrieval of a zone config. func (zh *zoneHandler) Get(path string, r *http.Request) (body []byte, contentType string, err error) { // Scan all zones if the key is empty. if len(path) == 0 { sr := <-zh.kvDB.Scan(&storage.ScanRequest{ RequestHeader: storage.RequestHeader{ Key: engine.KeyConfigZonePrefix, EndKey: engine.PrefixEndKey(engine.KeyConfigZonePrefix), User: storage.UserRoot, }, MaxResults: maxGetResults, }) if sr.Error != nil { err = sr.Error return } if len(sr.Rows) == maxGetResults { glog.Warningf("retrieved maximum number of results (%d); some may be missing", maxGetResults) } var prefixes []string for _, kv := range sr.Rows { trimmed := bytes.TrimPrefix(kv.Key, engine.KeyConfigZonePrefix) prefixes = append(prefixes, url.QueryEscape(string(trimmed))) } // JSON-encode the prefixes array. contentType = "application/json" if body, err = json.Marshal(prefixes); err != nil { err = util.Errorf("unable to format zone configurations: %v", err) } } else { zoneKey := engine.MakeKey(engine.KeyConfigZonePrefix, engine.Key(path[1:])) var ok bool config := &storage.ZoneConfig{} if ok, _, err = kv.GetI(zh.kvDB, zoneKey, config); err != nil { return } // On get, if there's no zone config for the requested prefix, // return a not found error. if !ok { err = util.Errorf("no config found for key prefix %q", path) return } var out []byte if out, err = yaml.Marshal(config); err != nil { err = util.Errorf("unable to marshal zone config %+v to yaml: %v", config, err) return } if !utf8.ValidString(string(out)) { err = util.Errorf("config contents not valid utf8: %q", out) return } contentType = "text/yaml" body = out } return }
// TestInternalPushTxnPriorities verifies that txns with lower // priority are pushed; if priorities are equal, then the txns // are ordered by txn timestamp, with the more recent timestamp // being pushable. func TestInternalPushTxnPriorities(t *testing.T) { rng, _, clock, _ := createTestRangeWithClock(t) defer rng.Stop() ts1 := proto.Timestamp{WallTime: 1} ts2 := proto.Timestamp{WallTime: 2} testCases := []struct { pusherPriority, pusheePriority int32 pusherTS, pusheeTS proto.Timestamp expSuccess bool }{ // Pusher has higher priority succeeds. {2, 1, ts1, ts1, true}, // Pusher has lower priority fails. {1, 2, ts1, ts1, false}, // Pusher has lower priority fails, even with older txn timestamp. {1, 2, ts1, ts2, false}, // With same priorities, older txn timestamp succeeds. {1, 1, ts1, ts2, true}, // With same priorities, same txn timestamp fails. {1, 1, ts1, ts1, false}, // With same priorities, newer txn timestamp fails. {1, 1, ts2, ts1, false}, } for i, test := range testCases { key := engine.Key(fmt.Sprintf("key-%d", i)) pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock) pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock) pusher.Priority = test.pusherPriority pushee.Priority = test.pusheePriority pusher.Timestamp = test.pusherTS pushee.Timestamp = test.pusheeTS // Now, attempt to push the transaction with intent epoch set appropriately. args, reply := pushTxnArgs(pusher, pushee, true, 0) err := rng.ReadWriteCmd("InternalPushTxn", args, reply) if test.expSuccess != (err == nil) { t.Errorf("expected success on trial %d? %t; got err %v", i, test.expSuccess, err) } } }
// UpdateRangeDescriptor updates the range locations metadata for the // range specified by the meta parameter. This always involves a write // to "meta2", and may require a write to "meta1", in the event that // meta.EndKey is a "meta2" key (prefixed by KeyMeta2Prefix). func UpdateRangeDescriptor(db DB, meta proto.RangeMetadata, desc *proto.RangeDescriptor, timestamp proto.Timestamp) error { // TODO(spencer): a lot more work here to actually implement this. // Write meta2. key := engine.MakeKey(engine.KeyMeta2Prefix, meta.EndKey) if err := PutProto(db, key, desc, timestamp); err != nil { return err } return nil }
// TestInternalPushTxnOldEpoch verifies that a txn intent from an // older epoch may be pushed. func TestInternalPushTxnOldEpoch(t *testing.T) { rng, _, clock, _ := createTestRangeWithClock(t) defer rng.Stop() testCases := []struct { curEpoch, intentEpoch int32 expSuccess bool }{ // Same epoch; can't push based on epoch. {0, 0, false}, // The intent is newer; definitely can't push. {0, 1, false}, // The intent is old; can push. {1, 0, true}, } for i, test := range testCases { key := engine.Key(fmt.Sprintf("key-%d", i)) pusher := NewTransaction(engine.MakeKey(key, []byte{1}), 1, proto.SERIALIZABLE, clock) pushee := NewTransaction(engine.MakeKey(key, []byte{2}), 1, proto.SERIALIZABLE, clock) pushee.Priority = 2 pusher.Priority = 1 // Pusher won't win based on priority. // First, establish "start" of existing pushee's txn via heartbeat. pushee.Epoch = test.curEpoch hbArgs, hbReply := heartbeatArgs(pushee, 0) hbArgs.Timestamp = pushee.Timestamp if err := rng.ReadWriteCmd("InternalHeartbeatTxn", hbArgs, hbReply); err != nil { t.Fatal(err) } // Now, attempt to push the transaction with intent epoch set appropriately. pushee.Epoch = test.intentEpoch args, reply := pushTxnArgs(pusher, pushee, true, 0) err := rng.ReadWriteCmd("InternalPushTxn", args, reply) if test.expSuccess != (err == nil) { t.Errorf("expected success on trial %d? %t; got err %v", i, test.expSuccess, err) } } }
// allocateStoreIDs increments the store id generator key for the // specified node to allocate "inc" new, unique store ids. The // first ID in a contiguous range is returned on success. func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.KV) (proto.StoreID, error) { iReply := &proto.IncrementResponse{} if err := db.Call(proto.Increment, &proto.IncrementRequest{ RequestHeader: proto.RequestHeader{ Key: engine.MakeKey(engine.KeyStoreIDGeneratorPrefix, []byte(strconv.Itoa(int(nodeID)))), User: storage.UserRoot, }, Increment: inc, }, iReply); err != nil { return 0, util.Errorf("unable to allocate %d store IDs for node %d: %v", inc, nodeID, err) } return proto.StoreID(iReply.NewValue - inc + 1), nil }
// Put writes a perm config for the specified key prefix (which is treated as // a key). The perm config is parsed from the input "body". The perm config is // stored gob-encoded. The specified body must validly parse into a // perm config struct. func (ph *permHandler) Put(path string, body []byte, r *http.Request) error { if len(path) == 0 { return util.Errorf("no path specified for permission Put") } config := &proto.PermConfig{} if err := util.UnmarshalRequest(r, body, config, util.AllEncodings); err != nil { return util.Errorf("permission config has invalid format: %s: %s", config, err) } permKey := engine.MakeKey(engine.KeyConfigPermissionPrefix, proto.Key(path[1:])) if err := ph.db.PutProto(permKey, config); err != nil { return err } return nil }
// allocateStoreIDs increments the store id generator key for the // specified node to allocate "inc" new, unique store ids. The // first ID in a contiguous range is returned on success. func allocateStoreIDs(nodeID int32, inc int64, db storage.DB) (int32, error) { ir := <-db.Increment(&proto.IncrementRequest{ // The Key is a concatenation of StoreIDGeneratorPrefix and this node's ID. RequestHeader: proto.RequestHeader{ Key: engine.MakeKey(engine.KeyStoreIDGeneratorPrefix, []byte(strconv.Itoa(int(nodeID)))), User: storage.UserRoot, }, Increment: inc, }) if ir.Error != nil { return 0, util.Errorf("unable to allocate %d store IDs for node %d: %v", inc, nodeID, ir.Error) } return int32(ir.NewValue - inc + 1), nil }
// Delete removes the perm config specified by key. func (ph *permHandler) Delete(path string, r *http.Request) error { if len(path) == 0 { return util.Errorf("no path specified for permission Delete") } if path == "/" { return util.Errorf("the default permission configuration cannot be deleted") } permKey := engine.MakeKey(engine.KeyConfigPermissionPrefix, proto.Key(path[1:])) return ph.db.Call(proto.Delete, &proto.DeleteRequest{ RequestHeader: proto.RequestHeader{ Key: permKey, User: storage.UserRoot, }, }, &proto.DeleteResponse{}) }
func (tc *coordinator) addRequest(header *storage.RequestHeader) { // Ignore non-transactional requests. if len(header.TxID) == 0 { return } if _, ok := tc.TransactionMap[header.TxID]; !ok { tc.TransactionMap[header.TxID] = tc.newTxnMetadata() // TODO(jiajia): Reevaluate this logic of creating a goroutine // for each active transaction. Spencer suggests a heap // containing next heartbeat timeouts which is processed by a // single goroutine. go tc.heartbeat(engine.MakeKey(engine.KeyTransactionPrefix, engine.Key(header.TxID)), tc.TransactionMap[header.TxID].closer) } txnMeta := tc.TransactionMap[header.TxID] txnMeta.lastUpdateTS = tc.clock.Now() }
// TestEndTransactionWithErrors verifies various error conditions // are checked such as transaction already being committed or // aborted, or timestamp or epoch regression. func TestEndTransactionWithErrors(t *testing.T) { rng, mc, clock, _ := createTestRangeWithClock(t) defer rng.Stop() regressTS := clock.Now() *mc = hlc.ManualClock(1) txn := NewTransaction(engine.Key(""), 1, proto.SERIALIZABLE, clock) testCases := []struct { key engine.Key existStatus proto.TransactionStatus existEpoch int32 existTS proto.Timestamp expErrRegexp string }{ {engine.Key("a"), proto.COMMITTED, txn.Epoch, txn.Timestamp, "txn {.*}: already committed"}, {engine.Key("b"), proto.ABORTED, txn.Epoch, txn.Timestamp, "txn {.*}: already aborted"}, {engine.Key("c"), proto.PENDING, txn.Epoch + 1, txn.Timestamp, "txn {.*}: epoch regression: 0"}, {engine.Key("d"), proto.PENDING, txn.Epoch, regressTS, "txn {.*}: timestamp regression: {WallTime:1 Logical:0 .*}"}, } for _, test := range testCases { // Establish existing txn state by writing directly to range engine. var existTxn proto.Transaction gogoproto.Merge(&existTxn, txn) existTxn.ID = test.key existTxn.Status = test.existStatus existTxn.Epoch = test.existEpoch existTxn.Timestamp = test.existTS txnKey := engine.MakeKey(engine.KeyLocalTransactionPrefix, test.key) if err := engine.PutProto(rng.engine, txnKey, &existTxn); err != nil { t.Fatal(err) } // End the transaction, verify expected error. txn.ID = test.key args, reply := endTxnArgs(txn, true, 0) args.Timestamp = txn.Timestamp err := rng.ReadWriteCmd("EndTransaction", args, reply) if err == nil { t.Errorf("expected error matching %q", test.expErrRegexp) } else { if matched, regexpErr := regexp.MatchString(test.expErrRegexp, err.Error()); !matched || regexpErr != nil { t.Errorf("expected error to match %q (%v): %v", test.expErrRegexp, regexpErr, err.Error()) } } } }
// Put writes a zone config for the specified key prefix "key". The // zone config is parsed from the input "body". The zone config is // stored gob-encoded. The specified body must be valid utf8 and must // validly parse into a zone config struct. func (zh *zoneHandler) Put(path string, body []byte, r *http.Request) error { if len(path) == 0 { return util.Errorf("no path specified for zone Put") } configStr := string(body) if !utf8.ValidString(configStr) { return util.Errorf("config contents not valid utf8: %q", body) } config, err := storage.ParseZoneConfig(body) if err != nil { return util.Errorf("zone config has invalid format: %s: %v", configStr, err) } zoneKey := engine.MakeKey(engine.KeyConfigZonePrefix, engine.Key(path[1:])) if err := kv.PutI(zh.kvDB, zoneKey, config, hlc.HLTimestamp{}); err != nil { return err } return nil }
// Delete removes the zone config specified by key. func (zh *zoneHandler) Delete(path string, r *http.Request) error { if len(path) == 0 { return util.Errorf("no path specified for zone Delete") } if path == "/" { return util.Errorf("the default zone configuration cannot be deleted") } zoneKey := engine.MakeKey(engine.KeyConfigZonePrefix, engine.Key(path[1:])) dr := <-zh.kvDB.Delete(&storage.DeleteRequest{ RequestHeader: storage.RequestHeader{ Key: zoneKey, User: storage.UserRoot, }, }) if dr.Error != nil { return dr.Error } return nil }
// InternalHeartbeatTxn updates the transaction status and heartbeat // timestamp after receiving transaction heartbeat messages from // coordinator. The range will return the current status for this // transaction to the coordinator. func (r *Range) InternalHeartbeatTxn(args *proto.InternalHeartbeatTxnRequest, reply *proto.InternalHeartbeatTxnResponse) { // Create the actual key to the system-local transaction table. key := engine.MakeKey(engine.KeyLocalTransactionPrefix, args.Key) var txn proto.Transaction if _, err := engine.GetProto(r.engine, key, &txn); err != nil { reply.SetGoError(err) return } if txn.Status == proto.PENDING { if !args.Header().Timestamp.Less(txn.LastHeartbeat) { txn.LastHeartbeat = args.Header().Timestamp } if err := engine.PutProto(r.engine, key, &txn); err != nil { reply.SetGoError(err) return } } reply.Status = txn.Status }
// TestNullPrefixedKeys makes sure that the internal system keys are not accessible through the HTTP API. func TestNullPrefixedKeys(t *testing.T) { // TODO(zbrock + matthew) fix this once sqlite key encoding is finished so we can namespace user keys t.Skip("Internal Meta1 Keys should not be accessible from the HTTP REST API. But they are right now.") metaKey := engine.MakeKey(engine.KeyMeta1Prefix, engine.KeyMax) s := startNewServer() // Precondition: we want to make sure the meta1 key exists. initialVal, err := s.rawGet(metaKey) if err != nil { t.Fatalf("Precondition Failed! Unable to fetch %+v from local db", metaKey) } if initialVal == nil { t.Fatalf("Precondition Failed! Expected meta1 key to exist in the underlying store, but no value found") } // Try to manipulate the meta1 key. encMeta1Key := url.QueryEscape(string(metaKey)) runHTTPTestFixture(t, []RequestResponse{ { NewRequest("GET", encMeta1Key), NewResponse(404), }, { NewRequest("POST", encMeta1Key, "cool"), NewResponse(200), }, { NewRequest("GET", encMeta1Key), NewResponse(200, "cool", "application/octet-stream"), }, }, s) // Postcondition: the meta1 key is untouched. afterVal, err := s.rawGet(metaKey) if err != nil { t.Errorf("Unable to fetch %+v from local db", metaKey) } if !bytes.Equal(afterVal, initialVal) { t.Errorf("Expected meta1 to be unchanged, but it differed: %+v", afterVal) } }
// Get retrieves the perm configuration for the specified key. If the // key is empty, all perm configurations are returned. Otherwise, the // leading "/" path delimiter is stripped and the perm configuration // matching the remainder is retrieved. Note that this will retrieve // the default perm config if "key" is equal to "/", and will list all // configs if "key" is equal to "". The body result contains // JSON-formatted output for a listing of keys and JSON-formatted // output for retrieval of a perm config. func (ph *permHandler) Get(path string, r *http.Request) (body []byte, contentType string, err error) { // Scan all perms if the key is empty. if len(path) == 0 { sr := &proto.ScanResponse{} if err = ph.db.Call(proto.Scan, &proto.ScanRequest{ RequestHeader: proto.RequestHeader{ Key: engine.KeyConfigPermissionPrefix, EndKey: engine.KeyConfigPermissionPrefix.PrefixEnd(), User: storage.UserRoot, }, MaxResults: maxGetResults, }, sr); err != nil { return } if len(sr.Rows) == maxGetResults { log.Warningf("retrieved maximum number of results (%d); some may be missing", maxGetResults) } var prefixes []string for _, kv := range sr.Rows { trimmed := bytes.TrimPrefix(kv.Key, engine.KeyConfigPermissionPrefix) prefixes = append(prefixes, url.QueryEscape(string(trimmed))) } // Encode the response. body, contentType, err = util.MarshalResponse(r, prefixes, util.AllEncodings) } else { permKey := engine.MakeKey(engine.KeyConfigPermissionPrefix, proto.Key(path[1:])) var ok bool config := &proto.PermConfig{} if ok, _, err = ph.db.GetProto(permKey, config); err != nil { return } // On get, if there's no perm config for the requested prefix, // return a not found error. if !ok { err = util.Errorf("no config found for key prefix %q", path) return } body, contentType, err = util.MarshalResponse(r, config, util.AllEncodings) } return }
// TestSystemKeys makes sure that the internal system keys are // accessible through the HTTP API. // TODO(spencer): we need to ensure proper permissions through the // HTTP API. func TestSystemKeys(t *testing.T) { metaKey := engine.MakeKey(engine.KeyMeta1Prefix, engine.KeyMax) s := startNewServer() // Compute expected system key. desc := &proto.RangeDescriptor{ RaftID: 1, StartKey: engine.KeyMin, EndKey: engine.KeyMax, Replicas: []proto.Replica{ proto.Replica{ NodeID: 1, StoreID: 1, RangeID: 1, }, }, } protoBytes, err := gogoproto.Marshal(desc) if err != nil { t.Fatal(err) } // Manipulate the meta1 key. encMeta1Key := url.QueryEscape(string(metaKey)) runHTTPTestFixture(t, []RequestResponse{ { NewRequest("GET", encMeta1Key), NewResponse(200, string(protoBytes), "application/octet-stream"), }, { NewRequest("POST", encMeta1Key, "cool"), NewResponse(200), }, { NewRequest("GET", encMeta1Key), NewResponse(200, "cool", "application/octet-stream"), }, }, s) }