func (s *nomsShowTestSuite) TestNomsShow() { datasetName := "dsTest" str := spec.CreateValueSpecString("ldb", s.LdbDir, datasetName) s1 := types.String("test string") r := writeTestData(str, s1) res, _ := s.Run(main, []string{"show", str}) s.Equal(res1, res) str1 := spec.CreateValueSpecString("ldb", s.LdbDir, "#"+r.TargetHash().String()) res, _ = s.Run(main, []string{"show", str1}) s.Equal(res2, res) list := types.NewList(types.String("elem1"), types.Number(2), types.String("elem3")) r = writeTestData(str, list) res, _ = s.Run(main, []string{"show", str}) test.EqualsIgnoreHashes(s.T(), res3, res) str1 = spec.CreateValueSpecString("ldb", s.LdbDir, "#"+r.TargetHash().String()) res, _ = s.Run(main, []string{"show", str1}) s.Equal(res4, res) _ = writeTestData(str, s1) res, _ = s.Run(main, []string{"show", str}) test.EqualsIgnoreHashes(s.T(), res5, res) }
func TestTwoClientsWithEmptyDataset(t *testing.T) { assert := assert.New(t) id1 := "testdataset" cs := chunks.NewMemoryStore() dsx := newDS(id1, cs) dsy := newDS(id1, cs) // dsx: || -> |a| a := types.String("a") dsx, err := dsx.CommitValue(a) assert.NoError(err) assert.True(dsx.Head().Get(datas.ValueField).Equals(a)) // dsy: || -> |b| _, ok := dsy.MaybeHead() assert.False(ok) b := types.String("b") dsy, err = dsy.CommitValue(b) assert.Error(err) // Commit failed, but ds1 now has latest head, so we should be able to just try again. // dsy: |a| -> |b| dsy, err = dsy.CommitValue(b) assert.NoError(err) assert.True(dsy.Head().Get(datas.ValueField).Equals(b)) }
func (s *testSuite) TestCSVImportSkipRecordsCustomHeader() { input, err := ioutil.TempFile(s.TempDir, "") d.Chk.NoError(err) defer input.Close() defer os.Remove(input.Name()) _, err = input.WriteString("a,b\n") d.Chk.NoError(err) _, err = input.WriteString("7,8\n") d.Chk.NoError(err) setName := "csv" dataspec := spec.CreateValueSpecString("ldb", s.LdbDir, setName) stdout, stderr := s.Run(main, []string{"--no-progress", "--skip-records", "1", "--header", "x,y", input.Name(), dataspec}) s.Equal("", stdout) s.Equal("", stderr) cs := chunks.NewLevelDBStore(s.LdbDir, "", 1, false) ds := dataset.NewDataset(datas.NewDatabase(cs), setName) defer ds.Database().Close() defer os.RemoveAll(s.LdbDir) l := ds.HeadValue().(types.List) s.Equal(uint64(1), l.Len()) v := l.Get(0) st := v.(types.Struct) s.Equal(types.String("7"), st.Get("x")) s.Equal(types.String("8"), st.Get("y")) }
// doCommit manages concurrent access the single logical piece of mutable state: the current Root. doCommit is optimistic in that it is attempting to update head making the assumption that currentRootRef is the hash of the current head. The call to UpdateRoot below will return an 'ErrOptimisticLockFailed' error if that assumption fails (e.g. because of a race with another writer) and the entire algorithm must be tried again. This method will also fail and return an 'ErrMergeNeeded' error if the |commit| is not a descendent of the current dataset head func (ds *databaseCommon) doCommit(datasetID string, commit types.Struct) error { currentRootRef, currentDatasets := ds.getRootAndDatasets() // TODO: This Commit will be orphaned if the tryUpdateRoot() below fails commitRef := ds.WriteValue(commit) // First commit in store is always fast-foward. if !currentRootRef.IsEmpty() { r, hasHead := currentDatasets.MaybeGet(types.String(datasetID)) // First commit in dataset is always fast-foward. if hasHead { currentHeadRef := r.(types.Ref) // Allow only fast-forward commits. if commitRef.Equals(currentHeadRef) { return nil } if !descendsFrom(commit, currentHeadRef, ds) { return ErrMergeNeeded } } } currentDatasets = currentDatasets.Set(types.String(datasetID), commitRef) return ds.tryUpdateRoot(currentDatasets, currentRootRef) }
func (s *nomsShowTestSuite) TestNomsShow() { datasetName := "dsTest" str := test_util.CreateValueSpecString("ldb", s.LdbDir, datasetName) sp, err := spec.ParseDatasetSpec(str) d.Chk.NoError(err) ds, err := sp.Dataset() d.Chk.NoError(err) s1 := types.String("test string") r := writeTestData(ds, s1) s.Equal(res1, s.Run(main, []string{str})) spec1 := test_util.CreateValueSpecString("ldb", s.LdbDir, r.TargetHash().String()) s.Equal(res2, s.Run(main, []string{spec1})) ds, err = sp.Dataset() list := types.NewList(types.String("elem1"), types.Number(2), types.String("elem3")) r = writeTestData(ds, list) s.Equal(res3, s.Run(main, []string{str})) spec1 = test_util.CreateValueSpecString("ldb", s.LdbDir, r.TargetHash().String()) s.Equal(res4, s.Run(main, []string{spec1})) ds, err = sp.Dataset() _ = writeTestData(ds, s1) s.Equal(res5, s.Run(main, []string{str})) }
func TestReadToMap(t *testing.T) { assert := assert.New(t) ds := datas.NewDatabase(chunks.NewMemoryStore()) dataString := `a,1,true b,2,false ` r := NewCSVReader(bytes.NewBufferString(dataString), ',') headers := []string{"A", "B", "C"} kinds := KindSlice{types.StringKind, types.NumberKind, types.BoolKind} m := ReadToMap(r, headers, 0, kinds, ds) assert.Equal(uint64(2), m.Len()) assert.True(m.Type().Equals( types.MakeMapType(types.StringType, types.MakeStructType("", map[string]*types.Type{ "B": types.NumberType, "C": types.BoolType, })))) assert.True(m.Get(types.String("a")).Equals(types.NewStruct("", map[string]types.Value{ "B": types.Number(1), "C": types.Bool(true), }))) assert.True(m.Get(types.String("b")).Equals(types.NewStruct("", map[string]types.Value{ "B": types.Number(2), "C": types.Bool(false), }))) }
func TestReadToList(t *testing.T) { assert := assert.New(t) ds := datas.NewDatabase(chunks.NewMemoryStore()) dataString := `a,1,true b,2,false ` r := NewCSVReader(bytes.NewBufferString(dataString), ',') headers := []string{"A", "B", "C"} kinds := KindSlice{types.StringKind, types.NumberKind, types.BoolKind} l, typ := ReadToList(r, "test", headers, kinds, ds) assert.Equal(uint64(2), l.Len()) assert.Equal(types.StructKind, typ.Kind()) desc, ok := typ.Desc.(types.StructDesc) assert.True(ok) assert.Equal(desc.Len(), 3) assert.Equal(types.StringKind, desc.Field("A").Kind()) assert.Equal(types.NumberKind, desc.Field("B").Kind()) assert.Equal(types.BoolKind, desc.Field("C").Kind()) assert.True(l.Get(0).(types.Struct).Get("A").Equals(types.String("a"))) assert.True(l.Get(1).(types.Struct).Get("A").Equals(types.String("b"))) assert.True(l.Get(0).(types.Struct).Get("B").Equals(types.Number(1))) assert.True(l.Get(1).(types.Struct).Get("B").Equals(types.Number(2))) assert.True(l.Get(0).(types.Struct).Get("C").Equals(types.Bool(true))) assert.True(l.Get(1).(types.Struct).Get("C").Equals(types.Bool(false))) }
func TestHandlePostRoot(t *testing.T) { assert := assert.New(t) cs := chunks.NewTestStore() vs := types.NewValueStore(types.NewBatchStoreAdaptor(cs)) commit := NewCommit(types.String("head"), types.NewSet(), types.NewStruct("Meta", types.StructData{})) newHead := types.NewMap(types.String("dataset1"), vs.WriteValue(commit)) chnx := []chunks.Chunk{ chunks.NewChunk([]byte("abc")), types.EncodeValue(newHead, nil), } err := cs.PutMany(chnx) assert.NoError(err) // First attempt should fail, as 'last' won't match. u := &url.URL{} queryParams := url.Values{} queryParams.Add("last", chnx[0].Hash().String()) queryParams.Add("current", chnx[1].Hash().String()) u.RawQuery = queryParams.Encode() url := u.String() w := httptest.NewRecorder() HandleRootPost(w, newRequest("POST", "", url, nil, nil), params{}, cs) assert.Equal(http.StatusConflict, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) // Now, update the root manually to 'last' and try again. assert.True(cs.UpdateRoot(chnx[0].Hash(), hash.Hash{})) w = httptest.NewRecorder() HandleRootPost(w, newRequest("POST", "", url, nil, nil), params{}, cs) assert.Equal(http.StatusOK, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) }
func (suite *DatabaseSuite) TestDatabaseConcurrency() { datasetID := "ds1" var err error // Setup: // |a| <- |b| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(b)) // Important to create this here. ds2 := suite.makeDs(suite.cs) // Change 1: // |a| <- |b| <- |c| c := types.String("c") cCommit := NewCommit(c, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, cCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(c)) // Change 2: // |a| <- |b| <- |e| // Should be disallowed, Database returned by Commit() should have |c| as Head. e := types.String("e") eCommit := NewCommit(e, types.NewSet(types.NewRef(bCommit)), types.EmptyStruct) ds2, err = ds2.Commit(datasetID, eCommit) suite.Error(err) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(c)) }
func (suite *DatabaseSuite) TestDatabaseDeleteConcurrent() { datasetID := "ds1" suite.Zero(suite.ds.Datasets().Len()) var err error // |a| a := types.String("a") aCommit := NewCommit(a, types.NewSet(), types.EmptyStruct) suite.ds, err = suite.ds.Commit(datasetID, aCommit) suite.NoError(err) // |a| <- |b| b := types.String("b") bCommit := NewCommit(b, types.NewSet(types.NewRef(aCommit)), types.EmptyStruct) ds2, err := suite.ds.Commit(datasetID, bCommit) suite.NoError(err) suite.True(suite.ds.Head(datasetID).Get(ValueField).Equals(a)) suite.True(ds2.Head(datasetID).Get(ValueField).Equals(b)) suite.ds, err = suite.ds.Delete(datasetID) suite.NoError(err) _, present := suite.ds.MaybeHead(datasetID) suite.False(present, "Dataset %s should not be present", datasetID) _, present = ds2.MaybeHead(datasetID) suite.True(present, "Dataset %s should be present", datasetID) // Get a fresh database, and verify that no databases are present newDs := suite.makeDs(suite.cs) suite.Equal(uint64(0), newDs.Datasets().Len()) newDs.Close() }
func (suite *DatabaseSuite) TestDatabaseDelete() { datasetID1, datasetID2 := "ds1", "ds2" datasets := suite.ds.Datasets() suite.Zero(datasets.Len()) // |a| var err error a := types.String("a") suite.ds, err = suite.ds.Commit(datasetID1, NewCommit(a, types.NewSet(), types.EmptyStruct)) suite.NoError(err) suite.True(suite.ds.Head(datasetID1).Get(ValueField).Equals(a)) // ds1; |a|, ds2: |b| b := types.String("b") suite.ds, err = suite.ds.Commit(datasetID2, NewCommit(b, types.NewSet(), types.EmptyStruct)) suite.NoError(err) suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b)) suite.ds, err = suite.ds.Delete(datasetID1) suite.NoError(err) suite.True(suite.ds.Head(datasetID2).Get(ValueField).Equals(b)) _, present := suite.ds.MaybeHead(datasetID1) suite.False(present, "Dataset %s should not be present", datasetID1) // Get a fresh database, and verify that only ds1 is present newDs := suite.makeDs(suite.cs) datasets = newDs.Datasets() suite.Equal(uint64(1), datasets.Len()) _, present = suite.ds.MaybeHead(datasetID2) suite.True(present, "Dataset %s should be present", datasetID2) newDs.Close() }
func TestNewCommit(t *testing.T) { assert := assert.New(t) commitFieldNames := []string{MetaField, ParentsField, ValueField} assertTypeEquals := func(e, a *types.Type) { assert.True(a.Equals(e), "Actual: %s\nExpected %s", a.Describe(), e.Describe()) } commit := NewCommit(types.Number(1), types.NewSet(), types.EmptyStruct) at := commit.Type() et := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.NumberType, }) assertTypeEquals(et, at) // Commiting another Number commit2 := NewCommit(types.Number(2), types.NewSet(types.NewRef(commit)), types.EmptyStruct) at2 := commit2.Type() et2 := et assertTypeEquals(et2, at2) // Now commit a String commit3 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), types.EmptyStruct) at3 := commit3.Type() et3 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.EmptyStructType, types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et3, at3) // Now commit a String with MetaInfo meta := types.NewStruct("Meta", types.StructData{"date": types.String("some date"), "number": types.Number(9)}) metaType := types.MakeStructType("Meta", []string{"date", "number"}, []*types.Type{types.StringType, types.NumberType}) assertTypeEquals(metaType, meta.Type()) commit4 := NewCommit(types.String("Hi"), types.NewSet(types.NewRef(commit2)), meta) at4 := commit4.Type() et4 := types.MakeStructType("Commit", commitFieldNames, []*types.Type{ metaType, types.MakeSetType(types.MakeRefType(types.MakeStructType("Commit", commitFieldNames, []*types.Type{ types.MakeUnionType(types.EmptyStructType, metaType), types.MakeSetType(types.MakeRefType(types.MakeCycleType(0))), types.MakeUnionType(types.NumberType, types.StringType), }))), types.StringType, }) assertTypeEquals(et4, at4) }
func metaInfoForCommit(fileOrUrl, source, comment string) types.Struct { date := time.Now().UTC().Format("2006-01-02T15:04:05-0700") metaValues := types.StructData{ "date": types.String(date), fileOrUrl: types.String(source), } if comment != "" { metaValues["comment"] = types.String(comment) } return types.NewStruct("Meta", metaValues) }
func metaInfoForCommit(sourceType, sourceVal, comment string) types.Struct { date := time.Now().UTC().Format("2006-01-02T15:04:05-0700") metaValues := map[string]types.Value{ "date": types.String(date), } if sourceType != "" { metaValues[sourceType] = types.String(sourceVal) } if comment != "" { metaValues["comment"] = types.String(comment) } return types.NewStruct("Meta", metaValues) }
func (suite *LevelDBPutCacheSuite) SetupTest() { suite.cache = newOrderedChunkCache() suite.values = []types.Value{ types.String("abc"), types.String("def"), types.String("ghi"), types.String("jkl"), types.String("mno"), } suite.chnx = map[hash.Hash]chunks.Chunk{} for _, v := range suite.values { suite.chnx[v.Hash()] = types.EncodeValue(v, nil) } }
func (suite *HTTPBatchStoreSuite) TestPutChunksInOrder() { vals := []types.Value{ types.String("abc"), types.String("def"), } l := types.NewList() for _, val := range vals { suite.store.SchedulePut(types.EncodeValue(val, nil), 1, types.Hints{}) l = l.Append(types.NewRef(val)) } suite.store.SchedulePut(types.EncodeValue(l, nil), 2, types.Hints{}) suite.store.Flush() suite.Equal(3, suite.cs.Writes) }
func TestAbsolutePaths(t *testing.T) { assert := assert.New(t) s0, s1 := types.String("foo"), types.String("bar") list := types.NewList(s0, s1) emptySet := types.NewSet() db := datas.NewDatabase(chunks.NewMemoryStore()) db.WriteValue(s0) db.WriteValue(s1) db.WriteValue(list) db.WriteValue(emptySet) var err error db, err = db.Commit("ds", datas.NewCommit(list, types.NewSet(), types.EmptyStruct)) assert.NoError(err) head := db.Head("ds") resolvesTo := func(exp types.Value, str string) { p, err := NewAbsolutePath(str) assert.NoError(err) act := p.Resolve(db) if exp == nil { assert.Nil(act) } else { assert.True(exp.Equals(act), "%s Expected %s Actual %s", str, types.EncodedValue(exp), types.EncodedValue(act)) } } resolvesTo(head, "ds") resolvesTo(emptySet, "ds.parents") resolvesTo(list, "ds.value") resolvesTo(s0, "ds.value[0]") resolvesTo(s1, "ds.value[1]") resolvesTo(head, "#"+head.Hash().String()) resolvesTo(list, "#"+list.Hash().String()) resolvesTo(s0, "#"+s0.Hash().String()) resolvesTo(s1, "#"+s1.Hash().String()) resolvesTo(s0, "#"+list.Hash().String()+"[0]") resolvesTo(s1, "#"+list.Hash().String()+"[1]") resolvesTo(nil, "foo") resolvesTo(nil, "foo.parents") resolvesTo(nil, "foo.value") resolvesTo(nil, "foo.value[0]") resolvesTo(nil, "#"+types.String("baz").Hash().String()) resolvesTo(nil, "#"+types.String("baz").Hash().String()+"[0]") }
// NomsValueFromDecodedJSON takes a generic Go interface{} and recursively // tries to resolve the types within so that it can build up and return // a Noms Value with the same structure. // // Currently, the only types supported are the Go versions of legal JSON types: // Primitives: // - float64 // - bool // - string // - nil // // Composites: // - []interface{} // - map[string]interface{} func NomsValueFromDecodedJSON(o interface{}, useStruct bool) types.Value { switch o := o.(type) { case string: return types.String(o) case bool: return types.Bool(o) case float64: return types.Number(o) case nil: return nil case []interface{}: items := make([]types.Value, 0, len(o)) for _, v := range o { nv := NomsValueFromDecodedJSON(v, useStruct) if nv != nil { items = append(items, nv) } } return types.NewList(items...) case map[string]interface{}: var v types.Value if useStruct { fields := make(map[string]types.Value, len(o)) for k, v := range o { nv := NomsValueFromDecodedJSON(v, useStruct) if nv != nil { k := types.EscapeStructField(k) fields[k] = nv } } v = types.NewStruct("", fields) } else { kv := make([]types.Value, 0, len(o)*2) for k, v := range o { nv := NomsValueFromDecodedJSON(v, useStruct) if nv != nil { kv = append(kv, types.String(k), nv) } } v = types.NewMap(kv...) } return v default: d.Chk.Fail("Nomsification failed.", "I don't understand %+v, which is of type %s!\n", o, reflect.TypeOf(o).String()) } return nil }
func TestLDBDatabase(t *testing.T) { assert := assert.New(t) d1 := os.TempDir() dir, err := ioutil.TempDir(d1, "flags") assert.NoError(err) ldbDir := path.Join(dir, "store") spec := fmt.Sprintf("ldb:%s", path.Join(dir, "store")) cs := chunks.NewLevelDBStoreUseFlags(ldbDir, "") ds := datas.NewDatabase(cs) s1 := types.String("A String") s1Ref := ds.WriteValue(s1) ds.Commit("testDs", datas.NewCommit().Set(datas.ValueField, s1Ref)) ds.Close() sp, errRead := ParseDatabaseSpec(spec) assert.NoError(errRead) store, err := sp.Database() assert.NoError(err) assert.Equal(s1, store.ReadValue(s1.Hash())) store.Close() os.Remove(dir) }
func (fs *nomsFS) removeCommon(path string, typeCheck func(inode types.Value)) fuse.Status { fs.mdLock.Lock() defer fs.mdLock.Unlock() np, code := fs.getPath(path) if code != fuse.OK { return code } typeCheck(np.inode) parent := np.parent dir := parent.inode.Get("contents").(types.Struct) entries := dir.Get("entries").(types.Map) entries = entries.Remove(types.String(np.name)) dir = dir.Set("entries", entries) fs.deleteNode(np) fs.updateNode(parent, parent.inode.Set("contents", dir)) fs.splice(parent) fs.commit() return fuse.OK }
func (fs *nomsFS) Rename(oldPath string, newPath string, context *fuse.Context) fuse.Status { fs.mdLock.Lock() defer fs.mdLock.Unlock() // We find the node, new parent, and node representing the shared point in the hierarchy in order to then minimize repeated work when splicing the hierarchy back together below. np, nparent, nshared, fname, code := fs.getPaths(oldPath, newPath) if code != fuse.OK { return code } // Remove the node from the old spot in the hierarchy. oparent := np.parent dir := oparent.inode.Get("contents").(types.Struct) entries := dir.Get("entries").(types.Map) entries = entries.Remove(types.String(np.name)) dir = dir.Set("entries", entries) fs.updateNode(oparent, oparent.inode.Set("contents", dir)) // Insert it into the new spot in the hierarchy np.parent = nparent np.name = fname fs.splices(oparent, np, nshared) fs.commit() return fuse.OK }
func TestLDBObject(t *testing.T) { assert := assert.New(t) dir, err := ioutil.TempDir(os.TempDir(), "") assert.NoError(err) ldbpath := path.Join(dir, "xx-yy") dsId := "dsId" cs1 := chunks.NewLevelDBStoreUseFlags(ldbpath, "") store1 := datas.NewDatabase(cs1) dataset1 := dataset.NewDataset(store1, dsId) s1 := types.String("Commit Value") r1 := store1.WriteValue(s1) _, err = dataset1.Commit(r1) assert.NoError(err) store1.Close() spec2 := fmt.Sprintf("ldb:%s::%s", ldbpath, dsId) assert.NoError(err) sp1, err := ParseDatasetSpec(spec2) assert.NoError(err) dataset2, err := sp1.Dataset() assert.NoError(err) r2 := dataset2.HeadValue() s2 := r2.(types.Ref).TargetValue(dataset2.Database()) assert.Equal(s1, s2) dataset2.Database().Close() spec3 := fmt.Sprintf("ldb:%s::%s", ldbpath, s1.Hash().String()) sp3, err := ParsePathSpec(spec3) database, v3, err := sp3.Value() assert.Equal(s1, v3) database.Close() }
func (s *testSuite) TestCSVImporterToMap() { input, err := ioutil.TempFile(s.TempDir, "") d.Chk.NoError(err) defer input.Close() defer os.Remove(input.Name()) _, err = input.WriteString("a,b,c\n") d.Chk.NoError(err) for i := 0; i < 20; i++ { _, err = input.WriteString(fmt.Sprintf("a%d,%d,%d\n", i, i, i*2)) d.Chk.NoError(err) } _, err = input.Seek(0, 0) d.Chk.NoError(err) setName := "csv" dataspec := test_util.CreateValueSpecString("ldb", s.LdbDir, setName) out := s.Run(main, []string{"-no-progress", "-column-types", "String,Number,Number", "-dest-type", "map:1", dataspec, input.Name()}) s.Equal("", out) cs := chunks.NewLevelDBStore(s.LdbDir, "", 1, false) ds := dataset.NewDataset(datas.NewDatabase(cs), setName) defer ds.Database().Close() defer os.RemoveAll(s.LdbDir) m := ds.HeadValue().(types.Map) s.Equal(uint64(20), m.Len()) for i := 0; i < 20; i++ { m.Get(types.Number(i)).(types.Struct).Equals(types.NewStruct("", map[string]types.Value{ "a": types.String(fmt.Sprintf("a%d", i)), "c": types.Number(i * 2), })) } }
func TestReadRef(t *testing.T) { assert := assert.New(t) dir, err := ioutil.TempDir(os.TempDir(), "") assert.NoError(err) datasetId := "dsName" ldbPath := path.Join(dir, "/name") cs1 := chunks.NewLevelDBStoreUseFlags(ldbPath, "") database1 := datas.NewDatabase(cs1) dataset1 := dataset.NewDataset(database1, datasetId) commit := types.String("Commit Value") dataset1, err = dataset1.Commit(commit) assert.NoError(err) r1 := dataset1.Head().Hash() dataset1.Database().Close() spec2 := fmt.Sprintf("ldb:%s::%s", ldbPath, r1.String()) sp2, err := ParsePathSpec(spec2) assert.NoError(err) database, v2, err := sp2.Value() assert.NoError(err) assert.EqualValues(r1.String(), v2.Hash().String()) database.Close() }
func (s *nomsShowTestSuite) TestTruncation() { toNomsList := func(l []string) types.List { nv := []types.Value{} for _, v := range l { nv = append(nv, types.String(v)) } return types.NewList(nv...) } str := test_util.CreateDatabaseSpecString("ldb", s.LdbDir) dbSpec, err := spec.ParseDatabaseSpec(str) s.NoError(err) db, err := dbSpec.Database() s.NoError(err) t := dataset.NewDataset(db, "truncate") t, err = addCommit(t, "the first line") s.NoError(err) l := []string{"one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven"} _, err = addCommitWithValue(t, toNomsList(l)) s.NoError(err) db.Close() dsSpec := test_util.CreateValueSpecString("ldb", s.LdbDir, "truncate") s.Equal(truncRes1, s.Run(main, []string{"-graph", "-show-value=true", dsSpec})) s.Equal(diffTrunc1, s.Run(main, []string{"-graph", "-show-value=false", dsSpec})) s.Equal(truncRes2, s.Run(main, []string{"-graph", "-show-value=true", "-max-lines=-1", dsSpec})) s.Equal(diffTrunc2, s.Run(main, []string{"-graph", "-show-value=false", "-max-lines=-1", dsSpec})) s.Equal(truncRes3, s.Run(main, []string{"-graph", "-show-value=true", "-max-lines=0", dsSpec})) s.Equal(diffTrunc3, s.Run(main, []string{"-graph", "-show-value=false", "-max-lines=0", dsSpec})) }
func TestNomsStructDiff(t *testing.T) { assert := assert.New(t) expected := `(root) { - "four": "four" + "four": "four-diff" } ["three"] { - field3: "field3-data" + field3: "field3-data-diff" } ` fieldData := []interface{}{ "field1", "field1-data", "field2", "field2-data", "field3", "field3-data", "field4", "field4-data", } s1 := createStruct("TestData", fieldData...) s2 := s1.Set("field3", types.String("field3-data-diff")) m1 := createMap("one", 1, "two", 2, "three", s1, "four", "four") m2 := createMap("one", 1, "two", 2, "three", s2, "four", "four-diff") buf := util.NewBuffer(nil) Diff(buf, m1, m2) assert.Equal(expected, buf.String()) }
func createStruct(i uint64) types.Value { return types.NewStructWithType(structType, types.ValueSlice{ types.Bool(i%2 == 0), types.Number(i), types.String(fmt.Sprintf("i am a 55 bytes............................%12d", i)), }) }
func diffStructs(dq *diffQueue, w io.Writer, p types.Path, v1, v2 types.Struct) { changed := types.StructDiff(v1, v2) wroteHeader := false for _, field := range changed { f1 := v1.Get(field) f2 := v2.Get(field) if canCompare(f1, f2) { p1 := p.AddField(field) dq.PushBack(diffInfo{path: p1, key: types.String(field), v1: f1, v2: f2}) } else { wroteHeader = writeHeader(w, wroteHeader, p) line(w, subPrefix, types.String(field), f1) line(w, addPrefix, types.String(field), f2) } } }
func (suite *WalkTestSuite) SetupTest() { suite.vs = types.NewTestValueStore() suite.shouldSeeItem = types.String("zzz") suite.shouldSee = types.NewList(suite.shouldSeeItem) suite.deadValue = types.Number(0xDEADBEEF) suite.mustSkip = types.NewList(suite.deadValue) }
func TestLDBDataset(t *testing.T) { assert := assert.New(t) dir, err := ioutil.TempDir(os.TempDir(), "") assert.NoError(err) ldbPath := path.Join(dir, "name") cs := chunks.NewLevelDBStoreUseFlags(ldbPath, "") ds := datas.NewDatabase(cs) id := "dsName" set := dataset.NewDataset(ds, id) commit := types.String("Commit Value") set, err = set.Commit(commit) assert.NoError(err) ds.Close() spec := fmt.Sprintf("ldb:%s::%s", ldbPath, id) sp, err := ParseDatasetSpec(spec) assert.NoError(err) dataset, err := sp.Dataset() assert.NoError(err) assert.EqualValues(commit, dataset.HeadValue()) os.Remove(dir) }