Example #1
0
func main() {
	dsFlags := dataset.NewFlags()
	flag.Parse()

	ds := dsFlags.CreateDataset()
	if ds == nil {
		flag.Usage()
		return
	}
	defer ds.Store().Close()

	if *addFlag != "" {
		addMp3(ds, *addFlag)
	}
	if *listFlag {
		listSongs(ds)
	}
}
Example #2
0
func main() {
	err := d.Try(func() {
		dsFlags := dataset.NewFlags()
		flag.Usage = customUsage
		flag.Parse()
		ds := dsFlags.CreateDataset()
		dir := flag.Arg(0)
		if ds == nil || dir == "" {
			flag.Usage()
			return
		}
		defer ds.Store().Close()

		if util.MaybeStartCPUProfile() {
			defer util.StopCPUProfile()
		}

		cpuCount := runtime.NumCPU()
		runtime.GOMAXPROCS(cpuCount)

		filesChan := make(chan fileIndex, 1024)
		refsChan := make(chan refIndex, 1024)

		getFilePaths := func() {
			index := 0
			err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
				d.Exp.NoError(err, "Cannot traverse directories")
				if !info.IsDir() && filepath.Ext(path) == ".xml" {
					filesChan <- fileIndex{path, index}
					index++
				}

				return nil
			})
			d.Exp.NoError(err)
			close(filesChan)
		}

		wg := sync.WaitGroup{}
		importXml := func() {
			expectedType := util.NewMapOfStringToValue()
			for f := range filesChan {
				file, err := os.Open(f.path)
				d.Exp.NoError(err, "Error getting XML")

				xmlObject, err := mxj.NewMapXmlReader(file)
				d.Exp.NoError(err, "Error decoding XML")
				object := xmlObject.Old()
				file.Close()

				nomsObj := util.NomsValueFromDecodedJSON(object)
				d.Chk.IsType(expectedType, nomsObj)
				r := ref.Ref{}

				if !*noIO {
					r = ds.Store().WriteValue(nomsObj).TargetRef()
				}

				refsChan <- refIndex{r, f.index}
			}

			wg.Done()
		}

		go getFilePaths()
		for i := 0; i < cpuCount*8; i++ {
			wg.Add(1)
			go importXml()
		}
		go func() {
			wg.Wait()
			close(refsChan) // done converting xml to noms
		}()

		refList := refIndexList{}
		for r := range refsChan {
			refList = append(refList, r)
		}
		sort.Sort(refList)

		refs := make(util.ListOfRefOfMapOfStringToValueDef, len(refList))
		for idx, r := range refList {
			refs[idx] = r.ref
		}

		if !*noIO {
			_, err := ds.Commit(refs.New())
			d.Exp.NoError(err)
		}

		util.MaybeWriteMemProfile()
	})

	if err != nil {
		log.Fatal(err)
	}
}
Example #3
0
package main

import (
	"flag"
	"fmt"
	"os"
	"runtime"

	"github.com/attic-labs/noms/clients/csv"
	"github.com/attic-labs/noms/d"
	"github.com/attic-labs/noms/dataset"
)

var (
	dsFlags = dataset.NewFlags()
	// Actually the delimiter uses runes, which can be multiple characters long.
	// https://blog.golang.org/strings
	delimiter = flag.String("delimiter", ",", "field delimiter for csv file, must be exactly one character long.")
)

func main() {
	cpuCount := runtime.NumCPU()
	runtime.GOMAXPROCS(cpuCount)

	flag.Usage = func() {
		fmt.Println("Usage: csv_exporter [options] > filename")
		flag.PrintDefaults()
	}

	flag.Parse()
	ds := dsFlags.CreateDataset()
Example #4
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	flag.Usage = func() {
		fmt.Printf("Usage: %s -ldb=/path/to/db -ds=sfcrime -input-file=/path/to/data.csv\n\n", os.Args[0])
		flag.PrintDefaults()
	}

	dsFlags := dataset.NewFlags()
	flag.Parse()
	ds := dsFlags.CreateDataset()
	if ds == nil || *inputFlag == "" {
		flag.Usage()
		return
	}
	defer ds.Store().Close()

	csvfile, err := os.Open(*inputFlag)
	if err != nil {
		fmt.Println(err)
		return
	}
	defer csvfile.Close()

	if util.MaybeStartCPUProfile() {
		defer util.StopCPUProfile()
	}

	reader := csv.NewReader(csvfile)

	minLon := float32(180)
	minLat := float32(90)
	maxLat := float32(-90)
	maxLon := float32(-180)

	// read the header row and discard it
	_, err = reader.Read()
	outLiers := 0
	limitExceeded := false
	iChan, rChan := getNomsWriter(ds.Store())
	refList := refIndexList{}
	refs := []types.Value{}

	// Start a go routine to add incident refs to the list as they are ready
	var refWg sync.WaitGroup
	refWg.Add(1)
	go func() {
		for ref := range rChan {
			refList = append(refList, ref)
		}
		sort.Sort(refList)
		for _, r := range refList {
			refs = append(refs, r.ref)
		}
		refWg.Done()
	}()

	index := 0

	for r, err := reader.Read(); !limitExceeded && err == nil; r, err = reader.Read() {
		rowsRead++
		id, _ := strconv.ParseInt(r[0], 10, 64)
		lon64, _ := strconv.ParseFloat(r[9], 32)
		lat64, _ := strconv.ParseFloat(r[10], 32)
		geopos := common.GeopositionDef{Latitude: float32(lat64), Longitude: float32(lon64)}
		incident := common.IncidentDef{
			ID:          id,
			Category:    r[1],
			Description: r[2],
			DayOfWeek:   r[3],
			Date:        r[4],
			Time:        r[5],
			PdDistrict:  r[6],
			Resolution:  r[7],
			Address:     r[8],
			Geoposition: geopos,
			PdID:        r[12],
		}

		if geopos.Latitude > 35 && geopos.Latitude < 40 && geopos.Longitude > -125 && geopos.Longitude < 120 {
			minLat = min(minLat, geopos.Latitude)
			maxLat = max(maxLat, geopos.Latitude)
			minLon = min(minLon, geopos.Longitude)
			maxLon = max(maxLon, geopos.Longitude)
			iChan <- incidentWithIndex{&incident, index}
			index++
		} else {
			outLiers++
		}
		if !*quietFlag && rowsRead%maxListSize == 0 {
			fmt.Printf("Processed %d rows, %d incidents, elapsed time: %.2f secs\n", rowsRead, numIncidents, time.Now().Sub(start).Seconds())
		}
		if rowsRead >= *limitFlag {
			limitExceeded = true
		}
	}

	close(iChan)
	refWg.Wait()

	incidentRefs := types.NewList(refs...)
	if !*quietFlag {
		fmt.Printf("Converting refs list to noms list: %.2f secs\n", time.Now().Sub(start).Seconds())
	}

	_, err = ds.Commit(ds.Store().WriteValue(incidentRefs))
	d.Exp.NoError(err)

	if !*quietFlag {
		fmt.Printf("Commit completed, elaspsed time: %.2f secs\n", time.Now().Sub(start).Seconds())
		printDataStats(rowsRead, numIncidents, outLiers, minLat, minLon, maxLat, maxLon)
	}

	fmt.Printf("Ref of list containing Incidents: %s, , elaspsed time: %.2f secs\n", incidentRefs.Ref(), time.Now().Sub(start).Seconds())
}