func (ds *Dataset) validateRefAsCommit(r types.Ref) types.Struct { v := ds.store.ReadValue(r.TargetHash()) if v == nil { panic(r.TargetHash().String() + " not found") } if !datas.IsCommitType(v.Type()) { panic("Not a commit: " + types.EncodedValue(v)) } return v.(types.Struct) }
func TestAbsolutePaths(t *testing.T) { assert := assert.New(t) s0, s1 := types.String("foo"), types.String("bar") list := types.NewList(s0, s1) emptySet := types.NewSet() db := datas.NewDatabase(chunks.NewMemoryStore()) db.WriteValue(s0) db.WriteValue(s1) db.WriteValue(list) db.WriteValue(emptySet) var err error db, err = db.Commit("ds", datas.NewCommit(list, types.NewSet(), types.EmptyStruct)) assert.NoError(err) head := db.Head("ds") resolvesTo := func(exp types.Value, str string) { p, err := NewAbsolutePath(str) assert.NoError(err) act := p.Resolve(db) if exp == nil { assert.Nil(act) } else { assert.True(exp.Equals(act), "%s Expected %s Actual %s", str, types.EncodedValue(exp), types.EncodedValue(act)) } } resolvesTo(head, "ds") resolvesTo(emptySet, "ds.parents") resolvesTo(list, "ds.value") resolvesTo(s0, "ds.value[0]") resolvesTo(s1, "ds.value[1]") resolvesTo(head, "#"+head.Hash().String()) resolvesTo(list, "#"+list.Hash().String()) resolvesTo(s0, "#"+s0.Hash().String()) resolvesTo(s1, "#"+s1.Hash().String()) resolvesTo(s0, "#"+list.Hash().String()+"[0]") resolvesTo(s1, "#"+list.Hash().String()+"[1]") resolvesTo(nil, "foo") resolvesTo(nil, "foo.parents") resolvesTo(nil, "foo.value") resolvesTo(nil, "foo.value[0]") resolvesTo(nil, "#"+types.String("baz").Hash().String()) resolvesTo(nil, "#"+types.String("baz").Hash().String()+"[0]") }
func main() { // Actually the delimiter uses runes, which can be multiple characters long. // https://blog.golang.org/strings delimiter := flag.String("delimiter", ",", "field delimiter for csv file, must be exactly one character long.") comment := flag.String("comment", "", "comment to add to commit's meta data") header := flag.String("header", "", "header row. If empty, we'll use the first row of the file") name := flag.String("name", "Row", "struct name. The user-visible name to give to the struct type that will hold each row of data.") columnTypes := flag.String("column-types", "", "a comma-separated list of types representing the desired type of each column. if absent all types default to be String") pathDescription := "noms path to blob to import" path := flag.String("path", "", pathDescription) flag.StringVar(path, "p", "", pathDescription) dateFlag := flag.String("date", "", fmt.Sprintf(`date of commit in ISO 8601 format ("%s"). By default, the current date is used.`, dateFormat)) noProgress := flag.Bool("no-progress", false, "prevents progress from being output if true") destType := flag.String("dest-type", "list", "the destination type to import to. can be 'list' or 'map:<pk>', where <pk> is the index position (0-based) of the column that is a the unique identifier for the column") skipRecords := flag.Uint("skip-records", 0, "number of records to skip at beginning of file") destTypePattern := regexp.MustCompile("^(list|map):(\\d+)$") spec.RegisterDatabaseFlags(flag.CommandLine) profile.RegisterProfileFlags(flag.CommandLine) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: csv-import [options] <csvfile> <dataset>\n\n") flag.PrintDefaults() } flag.Parse(true) var err error switch { case flag.NArg() == 0: err = errors.New("Maybe you put options after the dataset?") case flag.NArg() == 1 && *path == "": err = errors.New("If <csvfile> isn't specified, you must specify a noms path with -p") case flag.NArg() == 2 && *path != "": err = errors.New("Cannot specify both <csvfile> and a noms path with -p") case flag.NArg() > 2: err = errors.New("Too many arguments") } d.CheckError(err) var date = *dateFlag if date == "" { date = time.Now().UTC().Format(dateFormat) } else { _, err := time.Parse(dateFormat, date) d.CheckErrorNoUsage(err) } defer profile.MaybeStartProfile().Stop() var r io.Reader var size uint64 var filePath string var dataSetArgN int if *path != "" { db, val, err := spec.GetPath(*path) d.CheckError(err) if val == nil { d.CheckError(fmt.Errorf("Path %s not found\n", *path)) } blob, ok := val.(types.Blob) if !ok { d.CheckError(fmt.Errorf("Path %s not a Blob: %s\n", *path, types.EncodedValue(val.Type()))) } defer db.Close() r = blob.Reader() size = blob.Len() dataSetArgN = 0 } else { filePath = flag.Arg(0) res, err := os.Open(filePath) d.CheckError(err) defer res.Close() fi, err := res.Stat() d.CheckError(err) r = res size = uint64(fi.Size()) dataSetArgN = 1 } if !*noProgress { r = progressreader.New(r, getStatusPrinter(size)) } comma, err := csv.StringToRune(*delimiter) d.CheckErrorNoUsage(err) var dest int var pk int if *destType == "list" { dest = destList } else if match := destTypePattern.FindStringSubmatch(*destType); match != nil { dest = destMap pk, err = strconv.Atoi(match[2]) d.CheckErrorNoUsage(err) } else { fmt.Println("Invalid dest-type: ", *destType) return } cr := csv.NewCSVReader(r, comma) for i := uint(0); i < *skipRecords; i++ { cr.Read() } var headers []string if *header == "" { headers, err = cr.Read() d.PanicIfError(err) } else { headers = strings.Split(*header, string(comma)) } ds, err := spec.GetDataset(flag.Arg(dataSetArgN)) d.CheckError(err) defer ds.Database().Close() kinds := []types.NomsKind{} if *columnTypes != "" { kinds = csv.StringsToKinds(strings.Split(*columnTypes, ",")) } var value types.Value if dest == destList { value, _ = csv.ReadToList(cr, *name, headers, kinds, ds.Database()) } else { value = csv.ReadToMap(cr, headers, pk, kinds, ds.Database()) } mi := metaInfoForCommit(date, filePath, *path, *comment) _, err = ds.Commit(value, dataset.CommitOptions{Meta: mi}) if !*noProgress { status.Clear() } d.PanicIfError(err) }