func (suite *DatabaseSuite) TestDatabaseConcurrency() { datasetID := "ds1" var err error // Setup: // |a| <- |b| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b)) // Important to create this here. ds2 := suite.makeDs(suite.cs) // Change 1: // |a| <- |b| <- |c| c := types.String("c") cCommit := NewCommit(c, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, cCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(c)) // Change 2: // |a| <- |b| <- |e| // Should be disallowed, Database returned by Commit() should have |c| as Head. e := types.String("e") eCommit := NewCommit(e, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) ds2, err = ds2.Commit(datasetID, eCommit) suite.Error(err) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(c)) }
// Note: This test is asserting that findCommon correctly separates refs which are exclusive to |taller| from those which are |common|. func TestFindCommon(t *testing.T) { taller := &types.RefByHeight{} shorter := &types.RefByHeight{} for i := 0; i < 50; i++ { shorter.PushBack(types.NewRef(types.Number(i))) } for i := 50; i < 250; i++ { shorter.PushBack(types.NewRef(types.Number(i))) taller.PushBack(types.NewRef(types.Number(i))) } for i := 250; i < 275; i++ { taller.PushBack(types.NewRef(types.Number(i))) } sort.Sort(shorter) sort.Sort(taller) tallRefs, comRefs := findCommon(taller, shorter, 1) assert.Equal(t, 25, len(tallRefs)) assert.Equal(t, 200, len(comRefs)) assert.Equal(t, 0, len(*taller)) assert.Equal(t, 50, len(*shorter)) }
func TestWriteValue(t *testing.T) { assert := assert.New(t) factory := chunks.NewMemoryStoreFactory() defer factory.Shutter() router = setupWebServer(factory) defer func() { router = nil }() testString := "Now, what?" authKey = "anauthkeyvalue" w := httptest.NewRecorder() r, err := newRequest("GET", dbName+constants.RootPath, nil) assert.NoError(err) router.ServeHTTP(w, r) lastRoot := w.Body assert.Equal(http.StatusOK, w.Code) tval := types.Bool(true) wval := types.String(testString) chunk1 := types.EncodeValue(tval, nil) chunk2 := types.EncodeValue(wval, nil) refList := types.NewList(types.NewRef(tval), types.NewRef(wval)) chunk3 := types.EncodeValue(refList, nil) body := &bytes.Buffer{} // we would use this func, but it's private so use next line instead: serializeHints(body, map[ref.Ref]struct{}{hint: struct{}{}}) err = binary.Write(body, binary.BigEndian, uint32(0)) assert.NoError(err) chunks.Serialize(chunk1, body) chunks.Serialize(chunk2, body) chunks.Serialize(chunk3, body) w = httptest.NewRecorder() r, err = newRequest("POST", dbName+constants.WriteValuePath+"?access_token="+authKey, ioutil.NopCloser(body)) assert.NoError(err) router.ServeHTTP(w, r) assert.Equal(http.StatusCreated, w.Code) w = httptest.NewRecorder() args := fmt.Sprintf("&last=%s¤t=%s", lastRoot, types.NewRef(refList).TargetHash()) r, _ = newRequest("POST", dbName+constants.RootPath+"?access_token="+authKey+args, ioutil.NopCloser(body)) router.ServeHTTP(w, r) assert.Equal(http.StatusOK, w.Code) whash := wval.Hash() hints := map[hash.Hash]struct{}{whash: struct{}{}} rdr := buildGetRefsRequestBody(hints) r, _ = newRequest("POST", dbName+constants.GetRefsPath, rdr) r.Header.Add("Content-Type", "application/x-www-form-urlencoded") router.ServeHTTP(w, r) assert.Equal(http.StatusOK, w.Code) ms := chunks.NewMemoryStore() chunks.Deserialize(w.Body, ms, nil) v := types.DecodeValue(ms.Get(whash), datas.NewDatabase(ms)) assert.Equal(testString, string(v.(types.String))) }
func TestNewCommit(t *testing.T) { assert := assert.New(t) commitFieldNames := []string{MetaField, ParentsField, ValueField} assertTypeEquals := func(e, a *types.Type) { assert.True(a.Equals(e), "Actual: %s\nExpected %s", a.Describe(), e.Describe()) } commit := NewCommit(types.Number(1), types.NewSet(), types.EmptyStruct) at := commit.Type() et := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.NumberType, }) assertTypeEquals(et, at) // Commiting another Number commit2 := NewCommit(types.Number(2), types.NewSet(types.NewRef(commit)), types.EmptyStruct) at2 := commit2.Type() et2 := et assertTypeEquals(et2, at2) // Now commit a String commit3 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), types.EmptyStruct) at3 := commit3.Type() et3 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et3, at3) // Now commit a String with MetaInfo meta := types.NewStruct("Meta", types.StructData{"date": types.String("some date"), "number": types.Number(9)}) metaType := types.MakeStructType("Meta", []string{"date", "number"}, []*types.Type{types.StringType, types.NumberType}) assertTypeEquals(metaType, meta.Type()) commit4 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), meta) at4 := commit4.Type() et4 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ metaType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.MakeUnionType(types.EmptyStructType, metaType), types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et4, at4) }
func TestNomsRefDiff(t *testing.T) { expected := "- fckcbt7nk5jl4arco2dk7r9nj7abb6ci\n+ i7d3u5gekm48ot419t2cot6cnl7ltcah\n" l1 := createList(1) l2 := createList(2) r1 := types.NewRef(l1) r2 := types.NewRef(l2) buf := util.NewBuffer(nil) Diff(buf, r1, r2) test.EqualsIgnoreHashes(t, expected, buf.String()) }
func (suite *DatabaseSuite) TestDatabaseDeleteConcurrent() { datasetID := "ds1" suite.Zero(suite.ds.Datasets().Len()) var err error // |a| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) suite.NoError(err) // |a| <- |b| b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) ds2, err := suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(a)) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(b)) suite.ds, err = suite.ds.Delete(datasetID) suite.NoError(err) _, present := suite.ds.MaybeHead(datasetID) suite.False(present, "Dataset %s should not be present", datasetID) _, present = ds2.MaybeHead(datasetID) suite.True(present, "Dataset %s should be present", datasetID) // Get a fresh database, and verify that no databases are present newDs := suite.makeDs(suite.cs) suite.Equal(uint64(0), newDs.Datasets().Len()) newDs.Close() }
func TestHandleWriteValue(t *testing.T) { assert := assert.New(t) cs := chunks.NewTestStore() ds := NewDatabase(cs) l := types.NewList( ds.WriteValue(types.Bool(true)), ds.WriteValue(types.Bool(false)), ) ds.WriteValue(l) hint := l.Hash() newItem := types.NewEmptyBlob() itemChunk := types.EncodeValue(newItem, nil) l2 := l.Insert(1, types.NewRef(newItem)) listChunk := types.EncodeValue(l2, nil) body := &bytes.Buffer{} serializeHints(body, map[hash.Hash]struct{}{hint: struct{}{}}) sz := chunks.NewSerializer(body) sz.Put(itemChunk) sz.Put(listChunk) sz.Close() w := httptest.NewRecorder() HandleWriteValue(w, &http.Request{Body: ioutil.NopCloser(body), Method: "POST"}, params{}, cs) if assert.Equal(http.StatusCreated, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) { ds2 := NewDatabase(cs) v := ds2.ReadValue(l2.Hash()) if assert.NotNil(v) { assert.True(v.Equals(l2), "%+v != %+v", v, l2) } } }
func TestHandleWriteValueBackpressure(t *testing.T) { assert := assert.New(t) cs := &backpressureCS{ChunkStore: chunks.NewMemoryStore()} ds := NewDatabase(cs) l := types.NewList( ds.WriteValue(types.Bool(true)), ds.WriteValue(types.Bool(false)), ) ds.WriteValue(l) hint := l.Hash() newItem := types.NewEmptyBlob() itemChunk := types.EncodeValue(newItem, nil) l2 := l.Insert(1, types.NewRef(newItem)) listChunk := types.EncodeValue(l2, nil) body := &bytes.Buffer{} serializeHints(body, map[hash.Hash]struct{}{hint: struct{}{}}) sz := chunks.NewSerializer(body) sz.Put(itemChunk) sz.Put(listChunk) sz.Close() w := httptest.NewRecorder() HandleWriteValue(w, &http.Request{Body: ioutil.NopCloser(body), Method: "POST"}, params{}, cs) if assert.Equal(httpStatusTooManyRequests, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) { hashes := deserializeHashes(w.Body) assert.Len(hashes, 1) assert.Equal(l2.Hash(), hashes[0]) } }
func TestExplicitBranchUsingDatasets(t *testing.T) { assert := assert.New(t) id1 := "testdataset" id2 := "othertestdataset" cs := chunks.NewMemoryStore() ds1 := newDS(id1, cs) // ds1: |a| a := types.String("a") ds1, err := ds1.CommitValue(a) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(a)) // ds1: |a| // \ds2 ds2 := newDS(id2, cs) ds2, err = ds2.CommitValue(ds1.Head().Get(datas.ValueField)) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(a)) // ds1: |a| <- |b| b := types.String("b") ds1, err = ds1.CommitValue(b) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(b)) // ds1: |a| <- |b| // \ds2 <- |c| c := types.String("c") ds2, err = ds2.CommitValue(c) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(c)) // ds1: |a| <- |b| <--|d| // \ds2 <- |c| <--/ mergeParents := types.NewSet(types.NewRef(ds1.Head()), types.NewRef(ds2.Head())) d := types.String("d") ds2, err = ds2.Commit(d, CommitOptions{Parents: mergeParents}) assert.NoError(err) assert.True(ds2.Head().Get(datas.ValueField).Equals(d)) ds1, err = ds1.Commit(d, CommitOptions{Parents: mergeParents}) assert.NoError(err) assert.True(ds1.Head().Get(datas.ValueField).Equals(d)) }
func TestValidateRef(t *testing.T) { ds := createTestDataset("test") b := types.Bool(true) r := ds.Database().WriteValue(b) assert.Panics(t, func() { ds.validateRefAsCommit(r) }) assert.Panics(t, func() { ds.validateRefAsCommit(types.NewRef(b)) }) }
func TestPullWithMeta(t *testing.T) { assert := assert.New(t) sink := createTestDataset("sink") source := createTestDataset("source") v1 := types.Number(1) m1 := types.NewStruct("Meta", types.StructData{ "name": types.String("one"), }) source, err := source.Commit(v1, CommitOptions{Meta: m1}) assert.NoError(err) v2 := types.Number(2) m2 := types.NewStruct("Meta", types.StructData{ "name": types.String("two"), }) source, err = source.Commit(v2, CommitOptions{Meta: m2}) assert.NoError(err) h2 := source.Head() v3 := types.Number(3) m3 := types.NewStruct("Meta", types.StructData{ "name": types.String("three"), }) source, err = source.Commit(v3, CommitOptions{Meta: m3}) assert.NoError(err) v4 := types.Number(4) m4 := types.NewStruct("Meta", types.StructData{ "name": types.String("three"), }) source, err = source.Commit(v4, CommitOptions{Meta: m4}) assert.NoError(err) h4 := source.Head() sink, err = sink.Pull(source.Database(), types.NewRef(h2), 1, nil) assert.NoError(err) sink, err = sink.Pull(source.Database(), types.NewRef(h4), 1, nil) assert.NoError(err) assert.True(source.Head().Equals(sink.Head())) }
func (suite *HTTPBatchStoreSuite) TestPutChunkWithHints() { vals := []types.Value{ types.String("abc"), types.String("def"), } chnx := []chunks.Chunk{ types.EncodeValue(vals[0], nil), types.EncodeValue(vals[1], nil), } suite.NoError(suite.cs.PutMany(chnx)) l := types.NewList(types.NewRef(vals[0]), types.NewRef(vals[1])) suite.store.SchedulePut(types.EncodeValue(l, nil), 2, types.Hints{ chnx[0].Hash(): struct{}{}, chnx[1].Hash(): struct{}{}, }) suite.store.Flush() suite.Equal(3, suite.cs.Writes) }
func (suite *DatabaseSuite) TestReadWriteCachePersists() { var err error var v types.Value = types.Bool(true) suite.NotEqual(hash.Hash{}, suite.ds.WriteValue(v)) r := suite.ds.WriteValue(v) commit := NewCommit(v, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit("foo", commit) suite.NoError(err) suite.Equal(1, suite.cs.Writes-writesOnCommit) newCommit := NewCommit(r, types.NewSet(types.NewRef(commit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit("foo", newCommit) suite.NoError(err) }
func (suite *HTTPBatchStoreSuite) TestPutChunksInOrder() { vals := []types.Value{ types.String("abc"), types.String("def"), } l := types.NewList() for _, val := range vals { suite.store.SchedulePut(types.EncodeValue(val, nil), 1, types.Hints{}) l = l.Append(types.NewRef(val)) } suite.store.SchedulePut(types.EncodeValue(l, nil), 2, types.Hints{}) suite.store.Flush() suite.Equal(3, suite.cs.Writes) }
func TestPullDeepRefTopDown(t *testing.T) { assert := assert.New(t) sink := createTestDataset("sink") source := createTestDataset("source") sourceInitialValue := types.NewList( types.NewList(NewList(source)), types.NewSet(NewSet(source)), types.NewMap(NewMap(source), NewMap(source))) source, err := source.Commit(sourceInitialValue) assert.NoError(err) sink, err = sink.pull(source.Database(), types.NewRef(source.Head()), 1) assert.NoError(err) assert.True(source.Head().Equals(sink.Head())) }
func TestPullFirstCommitTopDown(t *testing.T) { assert := assert.New(t) sink := createTestDataset("sink") source := createTestDataset("source") sourceInitialValue := types.NewMap( types.String("first"), NewList(source), types.String("second"), NewList(source, types.Number(2))) NewList(sink) NewList(sink, types.Number(2)) source, err := source.Commit(sourceInitialValue) assert.NoError(err) sink, err = sink.pull(source.Database(), types.NewRef(source.Head()), 1) assert.NoError(err) assert.True(source.Head().Equals(sink.Head())) }
func (suite *HTTPBatchStoreSuite) TestPutChunksBackpressure() { bpcs := &backpressureCS{ChunkStore: suite.cs} bs := newHTTPBatchStoreForTest(bpcs) defer bs.Close() defer bpcs.Close() vals := []types.Value{ types.String("abc"), types.String("def"), } l := types.NewList() for _, v := range vals { bs.SchedulePut(types.EncodeValue(v, nil), 1, types.Hints{}) l = l.Append(types.NewRef(v)) } bs.SchedulePut(types.EncodeValue(l, nil), 2, types.Hints{}) bs.Flush() suite.Equal(6, suite.cs.Writes) }
func main() { cpuCount := runtime.NumCPU() runtime.GOMAXPROCS(cpuCount) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Moves datasets between or within databases\n\n") fmt.Fprintf(os.Stderr, "noms sync [options] <source-object> <dest-dataset>\n\n") flag.PrintDefaults() fmt.Fprintf(os.Stderr, "\nFor detailed information on spelling objects and datasets, see: at https://github.com/attic-labs/noms/blob/master/doc/spelling.md.\n\n") } spec.RegisterDatabaseFlags() flag.Parse() if flag.NArg() != 2 { util.CheckError(errors.New("expected a source object and destination dataset")) } sourceStore, sourceObj, err := spec.GetPath(flag.Arg(0)) util.CheckError(err) defer sourceStore.Close() sinkDataset, err := spec.GetDataset(flag.Arg(1)) util.CheckError(err) defer sinkDataset.Database().Close() err = d.Try(func() { defer profile.MaybeStartProfile().Stop() var err error sinkDataset, err = sinkDataset.Pull(sourceStore, types.NewRef(sourceObj), int(*p)) d.Exp.NoError(err) }) if err != nil { log.Fatal(err) } }
func TestPullTopDown(t *testing.T) { assert := assert.New(t) sink := createTestDataset("sink") source := createTestDataset("source") // Give sink and source some initial shared context. sourceInitialValue := types.NewMap( types.String("first"), NewList(source), types.String("second"), NewList(source, types.Number(2))) sinkInitialValue := types.NewMap( types.String("first"), NewList(sink), types.String("second"), NewList(sink, types.Number(2))) var err error source, err = source.Commit(sourceInitialValue) assert.NoError(err) sink, err = sink.Commit(sinkInitialValue) assert.NoError(err) // Add some new stuff to source. updatedValue := sourceInitialValue.Set( types.String("third"), NewList(source, types.Number(3))) source, err = source.Commit(updatedValue) assert.NoError(err) // Add some more stuff, so that source isn't directly ahead of sink. updatedValue = updatedValue.Set( types.String("fourth"), NewList(source, types.Number(4))) source, err = source.Commit(updatedValue) assert.NoError(err) sink, err = sink.pull(source.Database(), types.NewRef(source.Head()), 1) assert.NoError(err) assert.True(source.Head().Equals(sink.Head())) }
func (suite *DatabaseSuite) TestWriteRefToNonexistentValue() { suite.Panics(func() { suite.ds.WriteValue(types.NewRef(types.Bool(true))) }) }
func (suite *DatabaseSuite) TestDatabaseCommit() { datasetID := "ds1" datasets := suite.ds.Datasets() suite.Zero(datasets.Len()) // |a| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) ds2, err := suite.ds.Commit(datasetID, aCommit) suite.NoError(err) // The old database still has no head. _, ok := suite.ds.MaybeHead(datasetID) suite.False(ok) _, ok = suite.ds.MaybeHeadRef(datasetID) suite.False(ok) // The new database has |a|. aCommit1 := ds2.Head(datasetID) suite.True(aCommit1.Get(ValueField).Equals(a)) aRef1 := ds2.HeadRef(datasetID) suite.Equal(aCommit1.Hash(), aRef1.TargetHash()) suite.Equal(uint64(1), aRef1.Height()) suite.ds = ds2 // |a| <- |b| b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b)) suite.Equal(uint64(2), suite.ds.HeadRef(datasetID).Height()) // |a| <- |b| // \----|c| // Should be disallowed. c := types.String("c") cCommit := NewCommit(c, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, cCommit) suite.Error(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b)) // |a| <- |b| <- |d| d := types.String("d") dCommit := NewCommit(d, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, dCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(d)) suite.Equal(uint64(3), suite.ds.HeadRef(datasetID).Height()) // Attempt to recommit |b| with |a| as parent. // Should be disallowed. suite.ds, err = suite.ds.Commit(datasetID, bCommit) suite.Error(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(d)) // Add a commit to a different datasetId _, err = suite.ds.Commit("otherDs", aCommit) suite.NoError(err) // Get a fresh database, and verify that both datasets are present newDs := suite.makeDs(suite.cs) datasets2 := newDs.Datasets() suite.Equal(uint64(2), datasets2.Len()) newDs.Close() }
// Initialize a new CommitIterator with the first commit to be printed. func NewCommitIterator(db datas.Database, commit types.Struct) *CommitIterator { cr := types.NewRef(commit) return &CommitIterator{db: db, branches: branchList{branch{cr: cr, commit: commit}}} }