func (suite *DatabaseSuite) TestDatabaseDelete() { datasetID1, datasetID2 := "ds1", "ds2" datasets := suite.ds.Datasets() suite.Zero(datasets.Len()) // |a| var err error a := types.String("a") suite.ds, err = suite.ds.Commit(datasetID1, NewCommit(a, types.NewSet(), types.EmptyStruct)) suite.NoError(err) suite.True(suite.ds.Head(datasetID1).Get(ValueField).Equals(a)) // ds1; |a|, ds2: |b| b := types.String("b") suite.ds, err = suite.ds.Commit(datasetID2, NewCommit(b, types.NewSet(), types.EmptyStruct)) suite.NoError(err) suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b)) suite.ds, err = suite.ds.Delete(datasetID1) suite.NoError(err) suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b)) _, present := suite.ds.MaybeHead(datasetID1) suite.False(present, "Dataset %s should not be present", datasetID1) // Get a fresh database, and verify that only ds1 is present newDs := suite.makeDs(suite.cs) datasets = newDs.Datasets() suite.Equal(uint64(1), datasets.Len()) _, present = suite.ds.MaybeHead(datasetID2) suite.True(present, "Dataset %s should be present", datasetID2) newDs.Close() }
// Source: -6-> C2(L4) -1-> N // . \ -4-> L3 -1-> N // . \ -3-> L2 -1-> N // . \ - "oy!" // 5 \ -2-> L1 -1-> N // . \ -1-> L0 // C1(L4) -1-> N // \ -4-> L3 -1-> N // \ -3-> L2 -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 // Sink: -5-> C1(L4) -1-> N // \ -4-> L3 -1-> N // \ -3-> L2 -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 func (suite *PullSuite) TestPullUpdates() { sinkL := buildListOfHeight(4, suite.sink) sinkRef := suite.commitToSink(sinkL, types.NewSet()) expectedReads := suite.sinkCS.Reads srcL := buildListOfHeight(4, suite.source) sourceRef := suite.commitToSource(srcL, types.NewSet()) L3 := srcL.Get(1).(types.Ref).TargetValue(suite.source).(types.List) L2 := L3.Get(1).(types.Ref).TargetValue(suite.source).(types.List) L2 = L2.Append(suite.source.WriteValue(types.String("oy!"))) L3 = L3.Set(1, suite.source.WriteValue(L2)) srcL = srcL.Set(1, suite.source.WriteValue(L3)) sourceRef = suite.commitToSource(srcL, types.NewSet(sourceRef)) pt := startProgressTracker() Pull(suite.source, suite.sink, sourceRef, sinkRef, 2, pt.Ch) if suite.sinkIsLocal() { // 3 objects read from sink: L3, L2 and C1 (when considering the shared commit). expectedReads += 3 } suite.Equal(expectedReads, suite.sinkCS.Reads) pt.Validate(suite) suite.sink.validatingBatchStore().Flush() v := suite.sink.ReadValue(sourceRef.TargetHash()).(types.Struct) suite.NotNil(v) suite.True(srcL.Equals(v.Get(ValueField))) }
// Source: -6-> C3(L5) -1-> N // . \ -5-> L4 -1-> N // . \ -4-> L3 -1-> N // . \ -3-> L2 -1-> N // 5 \ -2-> L1 -1-> N // . \ -1-> L0 // C2(L4) -1-> N // . \ -4-> L3 -1-> N // . \ -3-> L2 -1-> N // . \ -2-> L1 -1-> N // 3 \ -1-> L0 // . // C1(L2) -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 // // Sink: -3-> C1(L2) -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 func (suite *PullSuite) TestPullMultiGeneration() { sinkL := buildListOfHeight(2, suite.sink) sinkRef := suite.commitToSink(sinkL, types.NewSet()) expectedReads := suite.sinkCS.Reads srcL := buildListOfHeight(2, suite.source) sourceRef := suite.commitToSource(srcL, types.NewSet()) srcL = buildListOfHeight(4, suite.source) sourceRef = suite.commitToSource(srcL, types.NewSet(sourceRef)) srcL = buildListOfHeight(5, suite.source) sourceRef = suite.commitToSource(srcL, types.NewSet(sourceRef)) pt := startProgressTracker() Pull(suite.source, suite.sink, sourceRef, sinkRef, 2, pt.Ch) if suite.sinkIsLocal() { // C1 gets read from most-local DB expectedReads++ } suite.Equal(expectedReads, suite.sinkCS.Reads) pt.Validate(suite) suite.sink.validatingBatchStore().Flush() v := suite.sink.ReadValue(sourceRef.TargetHash()).(types.Struct) suite.NotNil(v) suite.True(srcL.Equals(v.Get(ValueField))) }
// Source: -6-> C2(L5) -1-> N // . \ -5-> L4 -1-> N // . \ -4-> L3 -1-> N // . \ -3-> L2 -1-> N // 4 \ -2-> L1 -1-> N // . \ -1-> L0 // C1(L3) -1-> N // \ -3-> L2 -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 // // Sink: -5-> C3(L3') -1-> N // . \ -3-> L2 -1-> N // . \ \ -2-> L1 -1-> N // . \ \ -1-> L0 // . \ - "oy!" // 4 // . // C1(L3) -1-> N // \ -3-> L2 -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 func (suite *PullSuite) TestPullDivergentHistory() { sinkL := buildListOfHeight(3, suite.sink) sinkRef := suite.commitToSink(sinkL, types.NewSet()) srcL := buildListOfHeight(3, suite.source) sourceRef := suite.commitToSource(srcL, types.NewSet()) sinkL = sinkL.Append(types.String("oy!")) sinkRef = suite.commitToSink(sinkL, types.NewSet(sinkRef)) srcL = srcL.Set(1, buildListOfHeight(5, suite.source)) sourceRef = suite.commitToSource(srcL, types.NewSet(sourceRef)) preReads := suite.sinkCS.Reads pt := startProgressTracker() Pull(suite.source, suite.sink, sourceRef, sinkRef, 2, pt.Ch) // No objects read from sink, since sink Head is not an ancestor of source HEAD. suite.Equal(preReads, suite.sinkCS.Reads) pt.Validate(suite) suite.sink.validatingBatchStore().Flush() v := suite.sink.ReadValue(sourceRef.TargetHash()).(types.Struct) suite.NotNil(v) suite.True(srcL.Equals(v.Get(ValueField))) }
func (suite *DatabaseSuite) TestDatabaseConcurrency() { datasetID := "ds1" var err error // Setup: // |a| <- |b| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b)) // Important to create this here. ds2 := suite.makeDs(suite.cs) // Change 1: // |a| <- |b| <- |c| c := types.String("c") cCommit := NewCommit(c, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, cCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(c)) // Change 2: // |a| <- |b| <- |e| // Should be disallowed, Database returned by Commit() should have |c| as Head. e := types.String("e") eCommit := NewCommit(e, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) ds2, err = ds2.Commit(datasetID, eCommit) suite.Error(err) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(c)) }
func (suite *DatabaseSuite) TestDatabaseDeleteConcurrent() { datasetID := "ds1" suite.Zero(suite.ds.Datasets().Len()) var err error // |a| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) suite.NoError(err) // |a| <- |b| b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) ds2, err := suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(a)) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(b)) suite.ds, err = suite.ds.Delete(datasetID) suite.NoError(err) _, present := suite.ds.MaybeHead(datasetID) suite.False(present, "Dataset %s should not be present", datasetID) _, present = ds2.MaybeHead(datasetID) suite.True(present, "Dataset %s should be present", datasetID) // Get a fresh database, and verify that no databases are present newDs := suite.makeDs(suite.cs) suite.Equal(uint64(0), newDs.Datasets().Len()) newDs.Close() }
func (suite *WalkAllTestSuite) TestWalkComposites() { suite.walkWorker(suite.storeAndRef(types.NewList()), 2) suite.walkWorker(suite.storeAndRef(types.NewList(types.Bool(false), types.Number(8))), 4) suite.walkWorker(suite.storeAndRef(types.NewSet()), 2) suite.walkWorker(suite.storeAndRef(types.NewSet(types.Bool(false), types.Number(8))), 4) suite.walkWorker(suite.storeAndRef(types.NewMap()), 2) suite.walkWorker(suite.storeAndRef(types.NewMap(types.Number(8), types.Bool(true), types.Number(0), types.Bool(false))), 6) }
func TestNewCommit(t *testing.T) { assert := assert.New(t) commitFieldNames := []string{MetaField, ParentsField, ValueField} assertTypeEquals := func(e, a *types.Type) { assert.True(a.Equals(e), "Actual: %s\nExpected %s", a.Describe(), e.Describe()) } commit := NewCommit(types.Number(1), types.NewSet(), types.EmptyStruct) at := commit.Type() et := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.NumberType, }) assertTypeEquals(et, at) // Commiting another Number commit2 := NewCommit(types.Number(2), types.NewSet(types.NewRef(commit)), types.EmptyStruct) at2 := commit2.Type() et2 := et assertTypeEquals(et2, at2) // Now commit a String commit3 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), types.EmptyStruct) at3 := commit3.Type() et3 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et3, at3) // Now commit a String with MetaInfo meta := types.NewStruct("Meta", types.StructData{"date": types.String("some date"), "number": types.Number(9)}) metaType := types.MakeStructType("Meta", []string{"date", "number"}, []*types.Type{types.StringType, types.NumberType}) assertTypeEquals(metaType, meta.Type()) commit4 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), meta) at4 := commit4.Type() et4 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ metaType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.MakeUnionType(types.EmptyStructType, metaType), types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et4, at4) }
func (suite *DatabaseSuite) TestReadWriteCachePersists() { var err error var v types.Value = types.Bool(true) suite.NotEqual(hash.Hash{}, suite.ds.WriteValue(v)) r := suite.ds.WriteValue(v) commit := NewCommit(v, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit("foo", commit) suite.NoError(err) suite.Equal(1, suite.cs.Writes-writesOnCommit) newCommit := NewCommit(r, types.NewSet(types.NewRef(commit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit("foo", newCommit) suite.NoError(err) }
func TestCommitWithoutMetaField(t *testing.T) { assert := assert.New(t) metaCommit := types.NewStruct("Commit", types.StructData{ "value": types.Number(9), "parents": types.NewSet(), "meta": types.EmptyStruct, }) assert.True(IsCommitType(metaCommit.Type())) noMetaCommit := types.NewStruct("Commit", types.StructData{ "value": types.Number(9), "parents": types.NewSet(), }) assert.False(IsCommitType(noMetaCommit.Type())) }
func TestAbsolutePaths(t *testing.T) { assert := assert.New(t) s0, s1 := types.String("foo"), types.String("bar") list := types.NewList(s0, s1) emptySet := types.NewSet() db := datas.NewDatabase(chunks.NewMemoryStore()) db.WriteValue(s0) db.WriteValue(s1) db.WriteValue(list) db.WriteValue(emptySet) var err error db, err = db.Commit("ds", datas.NewCommit(list, types.NewSet(), types.EmptyStruct)) assert.NoError(err) head := db.Head("ds") resolvesTo := func(exp types.Value, str string) { p, err := NewAbsolutePath(str) assert.NoError(err) act := p.Resolve(db) if exp == nil { assert.Nil(act) } else { assert.True(exp.Equals(act), "%s Expected %s Actual %s", str, types.EncodedValue(exp), types.EncodedValue(act)) } } resolvesTo(head, "ds") resolvesTo(emptySet, "ds.parents") resolvesTo(list, "ds.value") resolvesTo(s0, "ds.value[0]") resolvesTo(s1, "ds.value[1]") resolvesTo(head, "#"+head.Hash().String()) resolvesTo(list, "#"+list.Hash().String()) resolvesTo(s0, "#"+s0.Hash().String()) resolvesTo(s1, "#"+s1.Hash().String()) resolvesTo(s0, "#"+list.Hash().String()+"[0]") resolvesTo(s1, "#"+list.Hash().String()+"[1]") resolvesTo(nil, "foo") resolvesTo(nil, "foo.parents") resolvesTo(nil, "foo.value") resolvesTo(nil, "foo.value[0]") resolvesTo(nil, "#"+types.String("baz").Hash().String()) resolvesTo(nil, "#"+types.String("baz").Hash().String()+"[0]") }
func TestHandlePostRoot(t *testing.T) { assert := assert.New(t) cs := chunks.NewTestStore() vs := types.NewValueStore(types.NewBatchStoreAdaptor(cs)) commit := NewCommit(types.String("head"), types.NewSet(), types.NewStruct("Meta", types.StructData{})) newHead := types.NewMap(types.String("dataset1"), vs.WriteValue(commit)) chnx := []chunks.Chunk{ chunks.NewChunk([]byte("abc")), types.EncodeValue(newHead, nil), } err := cs.PutMany(chnx) assert.NoError(err) // First attempt should fail, as 'last' won't match. u := &url.URL{} queryParams := url.Values{} queryParams.Add("last", chnx[0].Hash().String()) queryParams.Add("current", chnx[1].Hash().String()) u.RawQuery = queryParams.Encode() url := u.String() w := httptest.NewRecorder() HandleRootPost(w, newRequest("POST", "", url, nil, nil), params{}, cs) assert.Equal(http.StatusConflict, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) // Now, update the root manually to 'last' and try again. assert.True(cs.UpdateRoot(chnx[0].Hash(), hash.Hash{})) w = httptest.NewRecorder() HandleRootPost(w, newRequest("POST", "", url, nil, nil), params{}, cs) assert.Equal(http.StatusOK, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) }
func TestLDBDatabase(t *testing.T) { assert := assert.New(t) d1 := os.TempDir() dir, err := ioutil.TempDir(d1, "flags") assert.NoError(err) ldbDir := path.Join(dir, "store") spec := fmt.Sprintf("ldb:%s", path.Join(dir, "store")) cs := chunks.NewLevelDBStoreUseFlags(ldbDir, "") ds := datas.NewDatabase(cs) s1 := types.String("A String") s1Hash := ds.WriteValue(s1) ds.Commit("testDs", datas.NewCommit(s1Hash, types.NewSet(), types.EmptyStruct)) ds.Close() sp, errRead := parseDatabaseSpec(spec) assert.NoError(errRead) store, err := sp.Database() assert.NoError(err) assert.Equal(s1, store.ReadValue(s1.Hash())) store.Close() os.Remove(dir) }
func (suite *WalkTestSuite) SkipTestSkipSetElement() { wholeSet := types.NewSet(suite.mustSkip, suite.shouldSee).Insert(suite.shouldSee) reached := suite.skipWorker(wholeSet) for _, v := range []types.Value{wholeSet, suite.mustSkip, suite.shouldSee, suite.shouldSeeItem} { suite.Contains(reached, v, "Doesn't contain %+v", v) } suite.Len(reached, 4) }
func (suite *DatabaseSuite) TestDatabaseHeightOfCollections() { setOfStringType := types.MakeSetType(types.StringType) setOfRefOfStringType := types.MakeSetType(types.MakeRefType(types.StringType)) // Set<String> v1 := types.String("hello") v2 := types.String("world") s1 := types.NewSet(v1, v2) suite.Equal(uint64(1), suite.ds.WriteValue(s1).Height()) // Set<Ref<String>> s2 := types.NewSet(suite.ds.WriteValue(v1), suite.ds.WriteValue(v2)) suite.Equal(uint64(2), suite.ds.WriteValue(s2).Height()) // List<Set<String>> v3 := types.String("foo") v4 := types.String("bar") s3 := types.NewSet(v3, v4) l1 := types.NewList(s1, s3) suite.Equal(uint64(1), suite.ds.WriteValue(l1).Height()) // List<Ref<Set<String>> l2 := types.NewList(suite.ds.WriteValue(s1), suite.ds.WriteValue(s3)) suite.Equal(uint64(2), suite.ds.WriteValue(l2).Height()) // List<Ref<Set<Ref<String>>> s4 := types.NewSet(suite.ds.WriteValue(v3), suite.ds.WriteValue(v4)) l3 := types.NewList(suite.ds.WriteValue(s4)) suite.Equal(uint64(3), suite.ds.WriteValue(l3).Height()) // List<Set<String> | RefValue<Set<String>>> l4 := types.NewList(s1, suite.ds.WriteValue(s3)) suite.Equal(uint64(2), suite.ds.WriteValue(l4).Height()) l5 := types.NewList(suite.ds.WriteValue(s1), s3) suite.Equal(uint64(2), suite.ds.WriteValue(l5).Height()) // Familiar with the "New Jersey Turnpike" drink? Here's the noms version of that... everything := []types.Value{v1, v2, s1, s2, v3, v4, s3, l1, l2, s4, l3, l4, l5} andMore := make([]types.Value, 0, len(everything)*3+2) for _, v := range everything { andMore = append(andMore, v, v.Type(), suite.ds.WriteValue(v)) } andMore = append(andMore, setOfStringType, setOfRefOfStringType) suite.ds.WriteValue(types.NewList(andMore...)) }
func (suite *WalkTestSuite) TestStopWalkImmediately() { actual := 0 SomeP(types.NewList(types.NewSet(), types.NewList()), suite.vs, func(v types.Value, r *types.Ref) bool { actual++ return true }, 1) suite.Equal(1, actual) }
// Commit updates the commit that a dataset points at. The new Commit is constructed using v and the current Head. // If the update cannot be performed, e.g., because of a conflict, Commit returns an 'ErrMergeNeeded' error and the current snapshot of the dataset so that the client can merge the changes and try again. func (ds *Dataset) Commit(v types.Value) (Dataset, error) { p := types.NewSet() if headRef, ok := ds.MaybeHeadRef(); ok { headRef.TargetValue(ds.Database()) // TODO: This is a hack to deconfuse the validation code, which doesn't hold onto validation state between commits. p = p.Insert(headRef) } return ds.CommitWithParents(v, p) }
func buildSetIncrementally(count uint64, createFn createValueFn) types.Collection { s := types.NewSet() for i := uint64(0); i < count; i++ { s = s.Insert(createFn(i)) } return s }
func NewCommit() types.Struct { initialFields := map[string]types.Value{ ValueField: types.String(""), ParentsField: types.NewSet(), } return types.NewStructWithType(commitType, initialFields) }
func buildSet(count uint64, createFn createValueFn) types.Collection { values := make([]types.Value, count) for i := uint64(0); i < count; i++ { values[i] = createFn(i) } return types.NewSet(values...) }
func (suite *WalkTestSuite) SkipTestSkipMapValue() { shouldAlsoSeeItem := types.String("Also good") shouldAlsoSee := types.NewSet(shouldAlsoSeeItem) wholeMap := types.NewMap(suite.shouldSee, suite.mustSkip, shouldAlsoSee, suite.shouldSee) reached := suite.skipWorker(wholeMap) for _, v := range []types.Value{wholeMap, suite.shouldSee, suite.shouldSeeItem, suite.mustSkip, shouldAlsoSee, shouldAlsoSeeItem} { suite.Contains(reached, v, "Doesn't contain %+v", v) } suite.Len(reached, 8) }
func (suite *DatabaseSuite) TestReadWriteCache() { var v types.Value = types.Bool(true) suite.NotEqual(hash.Hash{}, suite.ds.WriteValue(v)) r := suite.ds.WriteValue(v).TargetHash() commit := NewCommit(v, types.NewSet(), types.EmptyStruct) newDs, err := suite.ds.Commit("foo", commit) suite.NoError(err) suite.Equal(1, suite.cs.Writes-writesOnCommit) v = newDs.ReadValue(r) suite.True(v.Equals(types.Bool(true))) }
// Source: -3-> C(L2) -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 // // Sink: Nada func (suite *PullSuite) TestPullEverything() { l := buildListOfHeight(2, suite.source) sourceRef := suite.commitToSource(l, types.NewSet()) Pull(suite.source, suite.sink, sourceRef, types.Ref{}, 2) suite.Equal(0, suite.sinkCS.Reads) suite.sink.batchStore().Flush() v := suite.sink.ReadValue(sourceRef.TargetHash()).(types.Struct) suite.NotNil(v) suite.True(l.Equals(v.Get(ValueField))) }
func getAncestors(commits types.Set, vr types.ValueReader) types.Set { ancestors := types.NewSet() commits.IterAll(func(v types.Value) { r := v.(types.Ref) c := r.TargetValue(vr).(types.Struct) next := []types.Value{} c.Get(ParentsField).(types.Set).IterAll(func(v types.Value) { next = append(next, v) }) ancestors = ancestors.Insert(next...) }) return ancestors }
// Source: -3-> C(L2) -1-> N // \ -2-> L1 -1-> N // \ -1-> L0 // // Sink: Nada func (suite *PullSuite) TestPullEverything() { l := buildListOfHeight(2, suite.source) sourceRef := suite.commitToSource(l, types.NewSet()) pt := startProgressTracker() Pull(suite.source, suite.sink, sourceRef, types.Ref{}, 2, pt.Ch) suite.Equal(0, suite.sinkCS.Reads) pt.Validate(suite) suite.sink.validatingBatchStore().Flush() v := suite.sink.ReadValue(sourceRef.TargetHash()).(types.Struct) suite.NotNil(v) suite.True(l.Equals(v.Get(ValueField))) }
func TestExplicitBranchUsingDatasets(t *testing.T) { assert := assert.New(t) id1 := "testdataset" id2 := "othertestdataset" cs := chunks.NewMemoryStore() ds1 := newDS(id1, cs) // ds1: |a| a := types.String("a") ds1, err := ds1.CommitValue(a) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(a)) // ds1: |a| // \ds2 ds2 := newDS(id2, cs) ds2, err = ds2.CommitValue(ds1.Head().Get(datas.ValueField)) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(a)) // ds1: |a| <- |b| b := types.String("b") ds1, err = ds1.CommitValue(b) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(b)) // ds1: |a| <- |b| // \ds2 <- |c| c := types.String("c") ds2, err = ds2.CommitValue(c) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(c)) // ds1: |a| <- |b| <--|d| // \ds2 <- |c| <--/ mergeParents := types.NewSet(types.NewRef(ds1.Head()), types.NewRef(ds2.Head())) d := types.String("d") ds2, err = ds2.Commit(d, CommitOptions{Parents: mergeParents}) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(d)) ds1, err = ds1.Commit(d, CommitOptions{Parents: mergeParents}) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(d)) }
func TestPullDeepRefTopDown(t *testing.T) { assert := assert.New(t) sink := createTestDataset("sink") source := createTestDataset("source") sourceInitialValue := types.NewList( types.NewList(NewList(source)), types.NewSet(NewSet(source)), types.NewMap(NewMap(source), NewMap(source))) source, err := source.Commit(sourceInitialValue) assert.NoError(err) sink, err = sink.pull(source.Database(), types.NewRef(source.Head()), 1) assert.NoError(err) assert.True(source.Head().Equals(sink.Head())) }
// Commit updates the commit that a dataset points at. The new Commit struct is constructed using `v`, `opts.Parents`, and `opts.Meta`. // If `opts.Parents` is the zero value (`types.Set{}`) then the current head is used. // If `opts.Meta is the zero value (`types.Struct{}`) then a fully initialized empty Struct is passed to NewCommit. // If the update cannot be performed, e.g., because of a conflict, CommitWith returns an 'ErrMergeNeeded' error and the current snapshot of the dataset so that the client can merge the changes and try again. func (ds *Dataset) Commit(v types.Value, opts CommitOptions) (Dataset, error) { parents := opts.Parents if (parents == types.Set{}) { parents = types.NewSet() if headRef, ok := ds.MaybeHeadRef(); ok { headRef.TargetValue(ds.Database()) // TODO: This is a hack to deconfuse the validation code, which doesn't hold onto validation state between commits. parents = parents.Insert(headRef) } } meta := opts.Meta // Ideally, would like to do 'if meta == types.Struct{}' but types.Struct is not comparable in Go // since it contains a slice. if meta.Type() == nil && len(meta.ChildValues()) == 0 { meta = types.EmptyStruct } newCommit := datas.NewCommit(v, parents, meta) store, err := ds.Database().Commit(ds.id, newCommit) return Dataset{store, ds.id}, err }
func (suite *WalkAllTestSuite) TestWalkNestedComposites() { cs := chunks.NewMemoryStore() suite.walkWorker(suite.storeAndRef(types.NewList(suite.NewSet(cs), types.Number(8))), 5) suite.walkWorker(suite.storeAndRef(types.NewSet(suite.NewList(cs), suite.NewSet(cs))), 6) // {"string": "string", // "list": [false true], // "map": {"nested": "string"} // "mtlist": [] // "set": [5 7 8] // []: "wow" // } nested := types.NewMap( types.String("string"), types.String("string"), types.String("list"), suite.NewList(cs, types.Bool(false), types.Bool(true)), types.String("map"), suite.NewMap(cs, types.String("nested"), types.String("string")), types.String("mtlist"), suite.NewList(cs), types.String("set"), suite.NewSet(cs, types.Number(5), types.Number(7), types.Number(8)), suite.NewList(cs), types.String("wow"), // note that the dupe list chunk is skipped ) suite.walkWorker(suite.storeAndRef(nested), 25) }
func traverseCommon(comRef, sinkHead types.Ref, db Database) traverseResult { if comRef.Height() > 1 && isRefOfCommitType(comRef.Type()) { commit := comRef.TargetValue(db).(types.Struct) // We don't want to traverse the parents of sinkHead, but we still want to traverse its Value on the sinkDB side. We also still want to traverse all children, in both the srcDB and sinkDB, of any common Commit that is not at the Head of sinkDB. exclusionSet := types.NewSet() if comRef.Equals(sinkHead) { exclusionSet = commit.Get(ParentsField).(types.Set) } chunks := types.RefSlice(commit.Chunks()) for i := 0; i < len(chunks); { if exclusionSet.Has(chunks[i]) { end := len(chunks) - 1 chunks.Swap(i, end) chunks = chunks[:end] continue } i++ } return traverseResult{comRef.TargetHash(), chunks, 0} } return traverseResult{} }