コード例 #1
0
ファイル: db.go プロジェクト: knz/cockroach
// StoreData writes the supplied time series data to the cockroach server.
// Stored data will be sampled at the supplied resolution.
func (db *DB) StoreData(ctx context.Context, r Resolution, data []tspb.TimeSeriesData) error {
	var kvs []roachpb.KeyValue

	// Process data collection: data is converted to internal format, and a key
	// is generated for each internal message.
	for _, d := range data {
		idatas, err := d.ToInternal(r.SlabDuration(), r.SampleDuration())
		if err != nil {
			return err
		}
		for _, idata := range idatas {
			var value roachpb.Value
			if err := value.SetProto(&idata); err != nil {
				return err
			}
			kvs = append(kvs, roachpb.KeyValue{
				Key:   MakeDataKey(d.Name, d.Source, r, idata.StartTimestampNanos),
				Value: value,
			})
		}
	}

	// Send the individual internal merge requests.
	b := &client.Batch{}
	for _, kv := range kvs {
		b.AddRawRequest(&roachpb.MergeRequest{
			Span: roachpb.Span{
				Key: kv.Key,
			},
			Value: kv.Value,
		})
	}

	return db.db.Run(ctx, b)
}
コード例 #2
0
ファイル: replica_state.go プロジェクト: veteranlu/cockroach
func setAppliedIndex(
	ctx context.Context,
	eng engine.ReadWriter,
	ms *enginepb.MVCCStats,
	rangeID roachpb.RangeID,
	appliedIndex,
	leaseAppliedIndex uint64,
) error {
	var value roachpb.Value
	value.SetInt(int64(appliedIndex))

	if err := engine.MVCCPut(ctx, eng, ms,
		keys.RaftAppliedIndexKey(rangeID),
		hlc.ZeroTimestamp,
		value,
		nil /* txn */); err != nil {
		return err
	}
	value.SetInt(int64(leaseAppliedIndex))
	return engine.MVCCPut(ctx, eng, ms,
		keys.LeaseAppliedIndexKey(rangeID),
		hlc.ZeroTimestamp,
		value,
		nil /* txn */)
}
コード例 #3
0
ファイル: rowwriter.go プロジェクト: BramGruneir/cockroach
// insertCPutFn is used by insertRow when conflicts should be respected.
// logValue is used for pretty printing.
func insertCPutFn(ctx context.Context, b puter, key *roachpb.Key, value *roachpb.Value) {
	// TODO(dan): We want do this V(2) log everywhere in sql. Consider making a
	// client.Batch wrapper instead of inlining it everywhere.
	if log.V(2) {
		log.InfofDepth(ctx, 1, "CPut %s -> %s", *key, value.PrettyPrint())
	}
	b.CPut(key, value, nil)
}
コード例 #4
0
ファイル: debug.go プロジェクト: veteranlu/cockroach
func maybeUnmarshalInline(v []byte, dest proto.Message) error {
	var meta enginepb.MVCCMetadata
	if err := meta.Unmarshal(v); err != nil {
		return err
	}
	value := roachpb.Value{
		RawBytes: meta.RawBytes,
	}
	return value.GetProto(dest)
}
コード例 #5
0
ファイル: replica_state.go プロジェクト: veteranlu/cockroach
func setLastIndex(
	ctx context.Context, eng engine.ReadWriter, rangeID roachpb.RangeID, lastIndex uint64,
) error {
	var value roachpb.Value
	value.SetInt(int64(lastIndex))

	return engine.MVCCPut(ctx, eng, nil, keys.RaftLastIndexKey(rangeID),
		hlc.ZeroTimestamp,
		value,
		nil /* txn */)
}
コード例 #6
0
ファイル: info_test.go プロジェクト: knz/cockroach
func newInfo(val float64) Info {
	now := timeutil.Now()

	v := roachpb.Value{Timestamp: hlc.Timestamp{WallTime: now.UnixNano()}}
	v.SetFloat(val)

	return Info{
		Value:     v,
		OrigStamp: now.UnixNano(),
		TTLStamp:  now.Add(time.Millisecond).UnixNano(),
	}
}
コード例 #7
0
ファイル: system.go プロジェクト: BramGruneir/cockroach
// Create the key/value pairs for the default zone config entry.
func createDefaultZoneConfig() []roachpb.KeyValue {
	var ret []roachpb.KeyValue
	value := roachpb.Value{}
	desc := config.DefaultZoneConfig()
	if err := value.SetProto(&desc); err != nil {
		log.Fatalf(context.TODO(), "could not marshal %v", desc)
	}
	ret = append(ret, roachpb.KeyValue{
		Key:   MakeZoneKey(keys.RootNamespaceID),
		Value: value,
	})
	return ret
}
コード例 #8
0
ファイル: debug.go プロジェクト: veteranlu/cockroach
func tryMeta(kv engine.MVCCKeyValue) (string, error) {
	if !bytes.HasPrefix(kv.Key.Key, keys.Meta1Prefix) && !bytes.HasPrefix(kv.Key.Key, keys.Meta2Prefix) {
		return "", errors.New("not a meta key")
	}
	value := roachpb.Value{
		Timestamp: kv.Key.Timestamp,
		RawBytes:  kv.Value,
	}
	var desc roachpb.RangeDescriptor
	if err := value.GetProto(&desc); err != nil {
		return "", err
	}
	return descStr(desc), nil
}
コード例 #9
0
ファイル: bench_test.go プロジェクト: knz/cockroach
// BenchmarkMVCCMergeTimeSeries computes performance of merging time series data.
// Uses an in-memory engine.
func BenchmarkMVCCMergeTimeSeries_RocksDB(b *testing.B) {
	ts := &roachpb.InternalTimeSeriesData{
		StartTimestampNanos: 0,
		SampleDurationNanos: 1000,
		Samples: []roachpb.InternalTimeSeriesSample{
			{Offset: 0, Count: 1, Sum: 5.0},
		},
	}
	var value roachpb.Value
	if err := value.SetProto(ts); err != nil {
		b.Fatal(err)
	}
	runMVCCMerge(setupMVCCInMemRocksDB, &value, 1024, b)
}
コード例 #10
0
ファイル: replica_state.go プロジェクト: veteranlu/cockroach
func setFrozenStatus(
	ctx context.Context,
	eng engine.ReadWriter,
	ms *enginepb.MVCCStats,
	rangeID roachpb.RangeID,
	frozen storagebase.ReplicaState_FrozenEnum,
) error {
	if frozen == storagebase.ReplicaState_FROZEN_UNSPECIFIED {
		return errors.New("cannot persist unspecified FrozenStatus")
	}
	var val roachpb.Value
	val.SetBool(frozen == storagebase.ReplicaState_FROZEN)
	return engine.MVCCPut(ctx, eng, ms,
		keys.RangeFrozenStatusKey(rangeID), hlc.ZeroTimestamp, val, nil)
}
コード例 #11
0
ファイル: store_pool.go プロジェクト: hvaara/cockroach
// storeGossipUpdate is the gossip callback used to keep the StorePool up to date.
func (sp *StorePool) storeGossipUpdate(_ string, content roachpb.Value) {
	var storeDesc roachpb.StoreDescriptor
	if err := content.GetProto(&storeDesc); err != nil {
		ctx := sp.AnnotateCtx(context.TODO())
		log.Error(ctx, err)
		return
	}

	sp.mu.Lock()
	defer sp.mu.Unlock()
	// Does this storeDetail exist yet?
	detail := sp.getStoreDetailLocked(storeDesc.StoreID)
	detail.markAlive(sp.clock.Now(), &storeDesc)
	sp.mu.queue.enqueue(detail)
}
コード例 #12
0
ファイル: store_pool.go プロジェクト: BramGruneir/cockroach
// storeGossipUpdate is the gossip callback used to keep the StorePool up to date.
func (sp *StorePool) storeGossipUpdate(_ string, content roachpb.Value) {
	var storeDesc roachpb.StoreDescriptor
	if err := content.GetProto(&storeDesc); err != nil {
		ctx := sp.AnnotateCtx(context.TODO())
		log.Error(ctx, err)
		return
	}

	sp.mu.Lock()
	defer sp.mu.Unlock()
	detail := sp.getStoreDetailLocked(storeDesc.StoreID)
	detail.desc = &storeDesc
	detail.lastUpdatedTime = sp.clock.PhysicalTime()
	sp.mu.nodeLocalities[storeDesc.Node.NodeID] = storeDesc.Node.Locality
}
コード例 #13
0
// append the given entries to the raft log. Takes the previous values of
// r.mu.lastIndex and r.mu.raftLogSize, and returns new values. We do this
// rather than modifying them directly because these modifications need to be
// atomic with the commit of the batch.
func (r *Replica) append(
	ctx context.Context,
	batch engine.ReadWriter,
	prevLastIndex uint64,
	prevRaftLogSize int64,
	entries []raftpb.Entry,
) (uint64, int64, error) {
	if len(entries) == 0 {
		return prevLastIndex, prevRaftLogSize, nil
	}
	var diff enginepb.MVCCStats
	var value roachpb.Value
	for i := range entries {
		ent := &entries[i]
		key := keys.RaftLogKey(r.RangeID, ent.Index)
		if err := value.SetProto(ent); err != nil {
			return 0, 0, err
		}
		value.InitChecksum(key)
		var err error
		if ent.Index > prevLastIndex {
			err = engine.MVCCBlindPut(ctx, batch, &diff, key, hlc.ZeroTimestamp, value, nil /* txn */)
		} else {
			err = engine.MVCCPut(ctx, batch, &diff, key, hlc.ZeroTimestamp, value, nil /* txn */)
		}
		if err != nil {
			return 0, 0, err
		}
	}

	// Delete any previously appended log entries which never committed.
	lastIndex := entries[len(entries)-1].Index
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(ctx, batch, &diff, keys.RaftLogKey(r.RangeID, i),
			hlc.ZeroTimestamp, nil /* txn */)
		if err != nil {
			return 0, 0, err
		}
	}

	if err := setLastIndex(ctx, batch, r.RangeID, lastIndex); err != nil {
		return 0, 0, err
	}

	raftLogSize := prevRaftLogSize + diff.SysBytes

	return lastIndex, raftLogSize, nil
}
コード例 #14
0
// deadReplicasGossipUpdate is the gossip callback used to keep the StorePool up to date.
func (sp *StorePool) deadReplicasGossipUpdate(_ string, content roachpb.Value) {
	var replicas roachpb.StoreDeadReplicas
	if err := content.GetProto(&replicas); err != nil {
		ctx := sp.AnnotateCtx(context.TODO())
		log.Error(ctx, err)
		return
	}

	sp.mu.Lock()
	defer sp.mu.Unlock()
	detail := sp.getStoreDetailLocked(replicas.StoreID)
	deadReplicas := make(map[roachpb.RangeID][]roachpb.ReplicaDescriptor)
	for _, r := range replicas.Replicas {
		deadReplicas[r.RangeID] = append(deadReplicas[r.RangeID], r.Replica)
	}
	detail.deadReplicas = deadReplicas
}
コード例 #15
0
ファイル: bench_test.go プロジェクト: knz/cockroach
// Benchmark batch time series merge operations. This benchmark does not
// perform any reads and is only used to measure the cost of the periodic time
// series updates.
func runMVCCBatchTimeSeries(emk engineMaker, batchSize int, b *testing.B) {
	// Precompute keys so we don't waste time formatting them at each iteration.
	numKeys := batchSize
	keys := make([]roachpb.Key, numKeys)
	for i := 0; i < numKeys; i++ {
		keys[i] = roachpb.Key(fmt.Sprintf("key-%d", i))
	}

	// We always write the same time series data (containing a single unchanging
	// sample). This isn't realistic but is fine because we're never reading the
	// data.
	var value roachpb.Value
	if err := value.SetProto(&roachpb.InternalTimeSeriesData{
		StartTimestampNanos: 0,
		SampleDurationNanos: 1000,
		Samples: []roachpb.InternalTimeSeriesSample{
			{Offset: 0, Count: 1, Sum: 5.0},
		},
	}); err != nil {
		b.Fatal(err)
	}

	eng := emk(b, fmt.Sprintf("batch_merge_%d", batchSize))
	defer eng.Close()

	b.ResetTimer()

	ts := hlc.Timestamp{}
	for i := 0; i < b.N; i++ {
		batch := eng.NewBatch()

		for j := 0; j < batchSize; j++ {
			ts.Logical++
			if err := MVCCMerge(context.Background(), batch, nil, keys[j], ts, value); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}

		if err := batch.Commit(); err != nil {
			b.Fatal(err)
		}
		batch.Close()
	}

	b.StopTimer()
}
コード例 #16
0
ファイル: node_liveness.go プロジェクト: knz/cockroach
// livenessGossipUpdate is the gossip callback used to keep the
// in-memory liveness info up to date.
func (nl *NodeLiveness) livenessGossipUpdate(key string, content roachpb.Value) {
	var liveness Liveness
	if err := content.GetProto(&liveness); err != nil {
		log.Error(context.TODO(), err)
		return
	}

	// If there's an existing liveness record, only update the received
	// timestamp if this is our first receipt of this node's liveness
	// or if the expiration or epoch was advanced.
	nl.mu.Lock()
	defer nl.mu.Unlock()
	exLiveness, ok := nl.mu.nodes[liveness.NodeID]
	if !ok || exLiveness.Expiration.Less(liveness.Expiration) || exLiveness.Epoch < liveness.Epoch {
		nl.mu.nodes[liveness.NodeID] = liveness
	}
}
コード例 #17
0
ファイル: merge.go プロジェクト: knz/cockroach
// MergeInternalTimeSeriesData exports the engine's C++ merge logic for
// InternalTimeSeriesData to higher level packages. This is intended primarily
// for consumption by high level testing of time series functionality.
func MergeInternalTimeSeriesData(
	sources ...roachpb.InternalTimeSeriesData,
) (roachpb.InternalTimeSeriesData, error) {
	// Wrap each proto in an inlined MVCC value, and marshal each wrapped value
	// to bytes. This is the format required by the engine.
	srcBytes := make([][]byte, 0, len(sources))
	for _, src := range sources {
		var val roachpb.Value
		if err := val.SetProto(&src); err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		bytes, err := protoutil.Marshal(&enginepb.MVCCMetadata{
			RawBytes: val.RawBytes,
		})
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
		srcBytes = append(srcBytes, bytes)
	}

	// Merge every element into a nil byte slice, one at a time.
	var (
		mergedBytes []byte
		err         error
	)
	for _, bytes := range srcBytes {
		mergedBytes, err = goMerge(mergedBytes, bytes)
		if err != nil {
			return roachpb.InternalTimeSeriesData{}, err
		}
	}

	// Unmarshal merged bytes and extract the time series value within.
	var meta enginepb.MVCCMetadata
	if err := proto.Unmarshal(mergedBytes, &meta); err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	mergedTS, err := MakeValue(meta).GetTimeseries()
	if err != nil {
		return roachpb.InternalTimeSeriesData{}, err
	}
	return mergedTS, nil
}
コード例 #18
0
ファイル: gossip.go プロジェクト: knz/cockroach
// updateNodeAddress is a gossip callback which fires with each
// update to the node address. This allows us to compute the
// total size of the gossip network (for determining max peers
// each gossip node is allowed to have), as well as to create
// new resolvers for each encountered host and to write the
// set of gossip node addresses to persistent storage when it
// changes.
func (g *Gossip) updateNodeAddress(_ string, content roachpb.Value) {
	ctx := g.AnnotateCtx(context.TODO())
	var desc roachpb.NodeDescriptor
	if err := content.GetProto(&desc); err != nil {
		log.Error(ctx, err)
		return
	}

	g.mu.Lock()
	defer g.mu.Unlock()

	// Skip if the node has already been seen.
	if _, ok := g.nodeDescs[desc.NodeID]; ok {
		return
	}

	g.nodeDescs[desc.NodeID] = &desc

	// Recompute max peers based on size of network and set the max
	// sizes for incoming and outgoing node sets.
	maxPeers := g.maxPeers(len(g.nodeDescs))
	g.mu.incoming.setMaxSize(maxPeers)
	g.outgoing.setMaxSize(maxPeers)

	// Skip if it's our own address.
	if desc.Address == g.mu.is.NodeAddr {
		return
	}

	// Add this new node address (if it's not already there) to our list
	// of resolvers so we can keep connecting to gossip if the original
	// resolvers go offline.
	g.maybeAddResolver(desc.Address)

	// Add new address (if it's not already there) to bootstrap info and
	// persist if possible.
	if g.storage != nil && g.maybeAddBootstrapAddress(desc.Address) {
		if err := g.storage.WriteBootstrapInfo(&g.bootstrapInfo); err != nil {
			log.Error(ctx, err)
		}
	}
}
コード例 #19
0
ファイル: migration.go プロジェクト: knz/cockroach
// MigrateZoneConfig migrates the legacy ZoneConfig format into the new one.
func MigrateZoneConfig(value *roachpb.Value) (ZoneConfig, error) {
	var zone ZoneConfig
	if err := value.GetProto(&zone); err != nil {
		return ZoneConfig{}, err
	}
	if len(zone.ReplicaAttrs) > 0 {
		if zone.NumReplicas > 0 || len(zone.Constraints.Constraints) > 0 {
			return ZoneConfig{}, errors.New("migration to new ZoneConfig failed due to previous partial upgrade")
		}
		zone.NumReplicas = int32(len(zone.ReplicaAttrs))
		if zone.NumReplicas > 0 {
			attrs := zone.ReplicaAttrs[0].Attrs
			zone.Constraints.Constraints = make([]Constraint, len(attrs))
			for i, attr := range attrs {
				zone.Constraints.Constraints[i].Value = attr
			}
		}
		zone.ReplicaAttrs = nil
	}
	return zone, nil
}
コード例 #20
0
ファイル: gossip.go プロジェクト: knz/cockroach
// updateSystemConfig is the raw gossip info callback.
// Unmarshal the system config, and if successfully, update out
// copy and run the callbacks.
func (g *Gossip) updateSystemConfig(key string, content roachpb.Value) {
	ctx := g.AnnotateCtx(context.TODO())
	if key != KeySystemConfig {
		log.Fatalf(ctx, "wrong key received on SystemConfig callback: %s", key)
	}
	cfg := config.SystemConfig{}
	if err := content.GetProto(&cfg); err != nil {
		log.Errorf(ctx, "could not unmarshal system config on callback: %s", err)
		return
	}

	g.systemConfigMu.Lock()
	defer g.systemConfigMu.Unlock()
	g.systemConfig = cfg
	g.systemConfigSet = true
	for _, c := range g.systemConfigChannels {
		select {
		case c <- struct{}{}:
		default:
		}
	}
}
コード例 #21
0
ファイル: db_test.go プロジェクト: knz/cockroach
func (tm *testModel) storeInModel(r Resolution, data tspb.TimeSeriesData) {
	// Note the source, used to construct keys for model queries.
	tm.seenSources[data.Source] = struct{}{}

	// Process and store data in the model.
	internalData, err := data.ToInternal(r.SlabDuration(), r.SampleDuration())
	if err != nil {
		tm.t.Fatalf("test could not convert time series to internal format: %s", err.Error())
	}

	for _, idata := range internalData {
		key := MakeDataKey(data.Name, data.Source, r, idata.StartTimestampNanos)
		keyStr := string(key)

		existing, ok := tm.modelData[keyStr]
		var newTs roachpb.InternalTimeSeriesData
		if ok {
			existingTs, err := existing.GetTimeseries()
			if err != nil {
				tm.t.Fatalf("test could not extract time series from existing model value: %s", err.Error())
			}
			newTs, err = engine.MergeInternalTimeSeriesData(existingTs, idata)
			if err != nil {
				tm.t.Fatalf("test could not merge time series into model value: %s", err.Error())
			}
		} else {
			newTs, err = engine.MergeInternalTimeSeriesData(idata)
			if err != nil {
				tm.t.Fatalf("test could not merge time series into model value: %s", err.Error())
			}
		}
		var val roachpb.Value
		if err := val.SetProto(&newTs); err != nil {
			tm.t.Fatal(err)
		}
		tm.modelData[keyStr] = val
	}
}
コード例 #22
0
ファイル: merge_test.go プロジェクト: knz/cockroach
func timeSeriesAsValue(start int64, duration int64, samples ...tsSample) roachpb.Value {
	ts := &roachpb.InternalTimeSeriesData{
		StartTimestampNanos: start,
		SampleDurationNanos: duration,
	}
	for _, sample := range samples {
		newSample := roachpb.InternalTimeSeriesSample{
			Offset: sample.offset,
			Count:  sample.count,
			Sum:    sample.sum,
		}
		if sample.count > 1 {
			newSample.Max = proto.Float64(sample.max)
			newSample.Min = proto.Float64(sample.min)
		}
		ts.Samples = append(ts.Samples, newSample)
	}
	var v roachpb.Value
	if err := v.SetProto(ts); err != nil {
		panic(err)
	}
	return v
}
コード例 #23
0
ファイル: metadata.go プロジェクト: knz/cockroach
// GetInitialValues returns the set of initial K/V values which should be added to
// a bootstrapping CockroachDB cluster in order to create the tables contained
// in the schema.
func (ms MetadataSchema) GetInitialValues() []roachpb.KeyValue {
	var ret []roachpb.KeyValue

	// Save the ID generator value, which will generate descriptor IDs for user
	// objects.
	value := roachpb.Value{}
	value.SetInt(int64(keys.MaxReservedDescID + 1))
	ret = append(ret, roachpb.KeyValue{
		Key:   keys.DescIDGenerator,
		Value: value,
	})

	// addDescriptor generates the needed KeyValue objects to install a
	// descriptor on a new cluster.
	addDescriptor := func(parentID ID, desc DescriptorProto) {
		// Create name metadata key.
		value := roachpb.Value{}
		value.SetInt(int64(desc.GetID()))
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeNameMetadataKey(parentID, desc.GetName()),
			Value: value,
		})

		// Create descriptor metadata key.
		value = roachpb.Value{}
		wrappedDesc := WrapDescriptor(desc)
		if err := value.SetProto(wrappedDesc); err != nil {
			log.Fatalf(context.TODO(), "could not marshal %v", desc)
		}
		ret = append(ret, roachpb.KeyValue{
			Key:   MakeDescMetadataKey(desc.GetID()),
			Value: value,
		})
	}

	// Generate initial values for system databases and tables, which have
	// static descriptors that were generated elsewhere.
	for _, sysObj := range ms.descs {
		addDescriptor(sysObj.parentID, sysObj.desc)
	}

	// Other key/value generation that doesn't fit into databases and
	// tables. This can be used to add initial entries to a table.
	ret = append(ret, ms.otherKV...)

	// Sort returned key values; this is valuable because it matches the way the
	// objects would be sorted if read from the engine.
	sort.Sort(roachpb.KeyValueByKey(ret))
	return ret
}
コード例 #24
0
ファイル: migration_test.go プロジェクト: knz/cockroach
func TestMigrateZoneConfig(t *testing.T) {
	defer leaktest.AfterTest(t)()

	testCases := []struct {
		input, want proto.Message
	}{
		{
			&ZoneConfig{
				ReplicaAttrs: []roachpb.Attributes{
					{Attrs: []string{"foo"}},
					{},
					{},
				},
			},
			&ZoneConfig{
				NumReplicas: 3,
				Constraints: Constraints{
					Constraints: []Constraint{
						{
							Type:  Constraint_POSITIVE,
							Value: "foo",
						},
					},
				},
			},
		},
		{
			&ZoneConfig{
				NumReplicas: 3,
				Constraints: Constraints{
					Constraints: []Constraint{
						{
							Type:  Constraint_POSITIVE,
							Value: "foo",
						},
					},
				},
			},
			&ZoneConfig{
				NumReplicas: 3,
				Constraints: Constraints{
					Constraints: []Constraint{
						{
							Type:  Constraint_POSITIVE,
							Value: "foo",
						},
					},
				},
			},
		},
	}

	for i, tc := range testCases {
		var val roachpb.Value
		if err := val.SetProto(tc.input); err != nil {
			t.Fatal(err)
		}
		out, err := MigrateZoneConfig(&val)
		if err != nil {
			t.Fatal(err)
		}
		if !proto.Equal(tc.want, &out) {
			t.Errorf("%d: MigrateZoneConfig(%+v) = %+v; not %+v", i, tc.input, out, tc.want)
		}
	}
}
コード例 #25
0
ファイル: debug.go プロジェクト: veteranlu/cockroach
func getProtoValue(data []byte, msg proto.Message) error {
	value := roachpb.Value{
		RawBytes: data,
	}
	return value.GetProto(msg)
}
コード例 #26
0
ファイル: debug.go プロジェクト: veteranlu/cockroach
func tryRangeIDKey(kv engine.MVCCKeyValue) (string, error) {
	if kv.Key.Timestamp != hlc.ZeroTimestamp {
		return "", fmt.Errorf("range ID keys shouldn't have timestamps: %s", kv.Key)
	}
	_, _, suffix, _, err := keys.DecodeRangeIDKey(kv.Key.Key)
	if err != nil {
		return "", err
	}

	// All range ID keys are stored inline on the metadata.
	var meta enginepb.MVCCMetadata
	if err := meta.Unmarshal(kv.Value); err != nil {
		return "", err
	}
	value := roachpb.Value{RawBytes: meta.RawBytes}

	// Values encoded as protobufs set msg and continue outside the
	// switch. Other types are handled inside the switch and return.
	var msg proto.Message
	switch {
	case bytes.Equal(suffix, keys.LocalLeaseAppliedIndexSuffix):
		fallthrough
	case bytes.Equal(suffix, keys.LocalRaftAppliedIndexSuffix):
		i, err := value.GetInt()
		if err != nil {
			return "", err
		}
		return strconv.FormatInt(i, 10), nil

	case bytes.Equal(suffix, keys.LocalRangeFrozenStatusSuffix):
		b, err := value.GetBool()
		if err != nil {
			return "", err
		}
		return strconv.FormatBool(b), nil

	case bytes.Equal(suffix, keys.LocalAbortCacheSuffix):
		msg = &roachpb.AbortCacheEntry{}

	case bytes.Equal(suffix, keys.LocalRangeLastGCSuffix):
		msg = &hlc.Timestamp{}

	case bytes.Equal(suffix, keys.LocalRaftTombstoneSuffix):
		msg = &roachpb.RaftTombstone{}

	case bytes.Equal(suffix, keys.LocalRaftTruncatedStateSuffix):
		msg = &roachpb.RaftTruncatedState{}

	case bytes.Equal(suffix, keys.LocalRangeLeaseSuffix):
		msg = &roachpb.Lease{}

	case bytes.Equal(suffix, keys.LocalRangeStatsSuffix):
		msg = &enginepb.MVCCStats{}

	case bytes.Equal(suffix, keys.LocalRaftHardStateSuffix):
		msg = &raftpb.HardState{}

	case bytes.Equal(suffix, keys.LocalRaftLastIndexSuffix):
		i, err := value.GetInt()
		if err != nil {
			return "", err
		}
		return strconv.FormatInt(i, 10), nil

	case bytes.Equal(suffix, keys.LocalRangeLastVerificationTimestampSuffixDeprecated):
		msg = &hlc.Timestamp{}

	case bytes.Equal(suffix, keys.LocalRangeLastReplicaGCTimestampSuffix):
		msg = &hlc.Timestamp{}

	default:
		return "", fmt.Errorf("unknown raft id key %s", suffix)
	}

	if err := value.GetProto(msg); err != nil {
		return "", err
	}
	return msg.String(), nil
}
コード例 #27
0
ファイル: server_test.go プロジェクト: BramGruneir/cockroach
func TestSystemConfigGossip(t *testing.T) {
	defer leaktest.AfterTest(t)()
	t.Skip("#12351")

	s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()
	ts := s.(*TestServer)
	ctx := context.TODO()

	key := sqlbase.MakeDescMetadataKey(keys.MaxReservedDescID)
	valAt := func(i int) *sqlbase.DatabaseDescriptor {
		return &sqlbase.DatabaseDescriptor{Name: "foo", ID: sqlbase.ID(i)}
	}

	// Register a callback for gossip updates.
	resultChan := ts.Gossip().RegisterSystemConfigChannel()

	// The span gets gossiped when it first shows up.
	select {
	case <-resultChan:

	case <-time.After(500 * time.Millisecond):
		t.Fatal("did not receive gossip message")
	}

	// Try a plain KV write first.
	if err := kvDB.Put(ctx, key, valAt(0)); err != nil {
		t.Fatal(err)
	}

	// Now do it as part of a transaction, but without the trigger set.
	if err := kvDB.Txn(ctx, func(txn *client.Txn) error {
		return txn.Put(key, valAt(1))
	}); err != nil {
		t.Fatal(err)
	}

	// Gossip channel should be dormant.
	// TODO(tschottdorf): This test is likely flaky. Why can't some other
	// process trigger gossip? It seems that a new range lease being
	// acquired will gossip a new system config since the hash changed and fail
	// the test (seen in practice during some buggy WIP).
	var systemConfig config.SystemConfig
	select {
	case <-resultChan:
		systemConfig, _ = ts.gossip.GetSystemConfig()
		t.Fatalf("unexpected message received on gossip channel: %v", systemConfig)

	case <-time.After(50 * time.Millisecond):
	}

	// This time mark the transaction as having a Gossip trigger.
	if err := kvDB.Txn(ctx, func(txn *client.Txn) error {
		txn.SetSystemConfigTrigger()
		return txn.Put(key, valAt(2))
	}); err != nil {
		t.Fatal(err)
	}

	// New system config received.
	select {
	case <-resultChan:
		systemConfig, _ = ts.gossip.GetSystemConfig()

	case <-time.After(500 * time.Millisecond):
		t.Fatal("did not receive gossip message")
	}

	// Now check the new config.
	var val *roachpb.Value
	for _, kv := range systemConfig.Values {
		if bytes.Equal(key, kv.Key) {
			val = &kv.Value
			break
		}
	}
	if val == nil {
		t.Fatal("key not found in gossiped info")
	}

	// Make sure the returned value is valAt(2).
	got := new(sqlbase.DatabaseDescriptor)
	if err := val.GetProto(got); err != nil {
		t.Fatal(err)
	}
	if expected := valAt(2); !reflect.DeepEqual(got, expected) {
		t.Fatalf("mismatch: expected %+v, got %+v", *expected, *got)
	}
}
コード例 #28
0
ファイル: backup.go プロジェクト: BramGruneir/cockroach
// Ingest loads some data in an sstable into an empty range. Only the keys
// between startKey and endKey are loaded. If newTableID is non-zero, every
// row's key is rewritten to be for that table.
func Ingest(
	ctx context.Context,
	txn *client.Txn,
	path string,
	checksum uint32,
	startKey, endKey roachpb.Key,
	newTableID sqlbase.ID,
) error {
	// TODO(mjibson): An appropriate value for this should be determined. The
	// current value was guessed at but appears to work well.
	const batchSize = 10000

	// TODO(dan): Check if the range being ingested into is empty. If newTableID
	// is non-zero, it'll have to be derived from startKey and endKey.

	f, err := os.Open(path)
	if err != nil {
		return err
	}
	defer f.Close()
	crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
	if _, err := io.Copy(crc, f); err != nil {
		return nil
	}
	if c := crc.Sum32(); c != checksum {
		return errors.Errorf("%s: checksum mismatch got %d expected %d", path, c, checksum)
	}

	sst, err := engine.MakeRocksDBSstFileReader()
	if err != nil {
		return err
	}
	defer sst.Close()
	if err := sst.AddFile(path); err != nil {
		return err
	}

	b := txn.NewBatch()
	var v roachpb.Value
	count := 0
	ingestFunc := func(kv engine.MVCCKeyValue) (bool, error) {
		v = roachpb.Value{RawBytes: kv.Value}
		v.ClearChecksum()
		if log.V(3) {
			log.Infof(ctx, "Put %s %s\n", kv.Key.Key, v.PrettyPrint())
		}
		b.Put(kv.Key.Key, &v)
		count++
		if count > batchSize {
			if err := txn.Run(b); err != nil {
				return true, err
			}
			b = txn.NewBatch()
			count = 0
		}
		return false, nil
	}
	if newTableID != 0 {
		// MakeRekeyMVCCKeyValFunc modifies the keys, but this is safe because
		// the one we get back from rocksDBIterator.Key is a copy (not a
		// reference to the mmaped file.)
		ingestFunc = MakeRekeyMVCCKeyValFunc(newTableID, ingestFunc)
	}
	startKeyMVCC, endKeyMVCC := engine.MVCCKey{Key: startKey}, engine.MVCCKey{Key: endKey}
	if err := sst.Iterate(startKeyMVCC, endKeyMVCC, ingestFunc); err != nil {
		return err
	}
	return txn.Run(b)
}
コード例 #29
0
ファイル: table.go プロジェクト: jmptrader/cockroach
// MarshalColumnValue returns a Go primitive value equivalent of val, of the
// type expected by col. If val's type is incompatible with col, or if
// col's type is not yet implemented, an error is returned.
func MarshalColumnValue(col ColumnDescriptor, val parser.Datum) (roachpb.Value, error) {
	var r roachpb.Value

	if val == parser.DNull {
		return r, nil
	}

	switch col.Type.Kind {
	case ColumnType_BOOL:
		if v, ok := val.(*parser.DBool); ok {
			r.SetBool(bool(*v))
			return r, nil
		}
	case ColumnType_INT:
		if v, ok := val.(*parser.DInt); ok {
			r.SetInt(int64(*v))
			return r, nil
		}
	case ColumnType_FLOAT:
		if v, ok := val.(*parser.DFloat); ok {
			r.SetFloat(float64(*v))
			return r, nil
		}
	case ColumnType_DECIMAL:
		if v, ok := val.(*parser.DDecimal); ok {
			err := r.SetDecimal(&v.Dec)
			return r, err
		}
	case ColumnType_STRING:
		if v, ok := val.(*parser.DString); ok {
			r.SetString(string(*v))
			return r, nil
		}
	case ColumnType_BYTES:
		if v, ok := val.(*parser.DBytes); ok {
			r.SetString(string(*v))
			return r, nil
		}
		if v, ok := val.(*parser.DString); ok {
			r.SetString(string(*v))
			return r, nil
		}
	case ColumnType_DATE:
		if v, ok := val.(*parser.DDate); ok {
			r.SetInt(int64(*v))
			return r, nil
		}
	case ColumnType_TIMESTAMP:
		if v, ok := val.(*parser.DTimestamp); ok {
			r.SetTime(v.Time)
			return r, nil
		}
	case ColumnType_TIMESTAMPTZ:
		if v, ok := val.(*parser.DTimestampTZ); ok {
			r.SetTime(v.Time)
			return r, nil
		}
	case ColumnType_INTERVAL:
		if v, ok := val.(*parser.DInterval); ok {
			err := r.SetDuration(v.Duration)
			return r, err
		}
	default:
		return r, errors.Errorf("unsupported column type: %s", col.Type.Kind)
	}
	return r, fmt.Errorf("value type %s doesn't match type %s of column %q",
		val.ResolvedType(), col.Type.Kind, col.Name)
}
コード例 #30
0
ファイル: table.go プロジェクト: jmptrader/cockroach
// UnmarshalColumnValue decodes the value from a key-value pair using the type
// expected by the column. An error is returned if the value's type does not
// match the column's type.
func UnmarshalColumnValue(
	a *DatumAlloc, kind ColumnType_Kind, value *roachpb.Value,
) (parser.Datum, error) {
	if value == nil {
		return parser.DNull, nil
	}

	switch kind {
	case ColumnType_BOOL:
		v, err := value.GetBool()
		if err != nil {
			return nil, err
		}
		return parser.MakeDBool(parser.DBool(v)), nil
	case ColumnType_INT:
		v, err := value.GetInt()
		if err != nil {
			return nil, err
		}
		return a.NewDInt(parser.DInt(v)), nil
	case ColumnType_FLOAT:
		v, err := value.GetFloat()
		if err != nil {
			return nil, err
		}
		return a.NewDFloat(parser.DFloat(v)), nil
	case ColumnType_DECIMAL:
		v, err := value.GetDecimal()
		if err != nil {
			return nil, err
		}
		dd := a.NewDDecimal(parser.DDecimal{})
		dd.Set(v)
		return dd, nil
	case ColumnType_STRING:
		v, err := value.GetBytes()
		if err != nil {
			return nil, err
		}
		return a.NewDString(parser.DString(v)), nil
	case ColumnType_BYTES:
		v, err := value.GetBytes()
		if err != nil {
			return nil, err
		}
		return a.NewDBytes(parser.DBytes(v)), nil
	case ColumnType_DATE:
		v, err := value.GetInt()
		if err != nil {
			return nil, err
		}
		return a.NewDDate(parser.DDate(v)), nil
	case ColumnType_TIMESTAMP:
		v, err := value.GetTime()
		if err != nil {
			return nil, err
		}
		return a.NewDTimestamp(parser.DTimestamp{Time: v}), nil
	case ColumnType_TIMESTAMPTZ:
		v, err := value.GetTime()
		if err != nil {
			return nil, err
		}
		return a.NewDTimestampTZ(parser.DTimestampTZ{Time: v}), nil
	case ColumnType_INTERVAL:
		d, err := value.GetDuration()
		if err != nil {
			return nil, err
		}
		return a.NewDInterval(parser.DInterval{Duration: d}), nil
	default:
		return nil, errors.Errorf("unsupported column type: %s", kind)
	}
}