func prepare() (dir1, dir2 string, ps *store.Store, clog *commit.Logger, rerr error) { var err error dir1, err = ioutil.TempDir("", "storetest_") if err != nil { return "", "", nil, nil, err } ps = new(store.Store) ps.Init(dir1) dir2, err = ioutil.TempDir("", "storemuts_") if err != nil { return dir1, "", nil, nil, err } clog = commit.NewLogger(dir2, "mutations", 50<<20) clog.Init() posting.Init(clog) worker.Init(ps) uid.Init(ps) loader.Init(ps, ps) f, err := os.Open("testdata.nq") if err != nil { return dir1, dir2, nil, clog, err } defer f.Close() _, err = loader.HandleRdfReader(f, 0, 1) if err != nil { return dir1, dir2, nil, clog, err } return dir1, dir2, ps, clog, nil }
func main() { flag.Parse() if !flag.Parsed() { glog.Fatal("Unable to parse flags") } if len(*cpuprofile) > 0 { f, err := os.Create(*cpuprofile) if err != nil { glog.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } logrus.SetLevel(logrus.InfoLevel) numCpus := runtime.NumCPU() prevProcs := runtime.GOMAXPROCS(numCpus) glog.WithField("num_cpu", numCpus). WithField("prev_maxprocs", prevProcs). Info("Set max procs to num cpus") if len(*rdfGzips) == 0 { glog.Fatal("No RDF GZIP files specified") } ps := new(store.Store) ps.Init(*postingDir) defer ps.Close() posting.Init(ps, nil) files := strings.Split(*rdfGzips, ",") for _, path := range files { if len(path) == 0 { continue } glog.WithField("path", path).Info("Handling...") f, err := os.Open(path) if err != nil { glog.WithError(err).Fatal("Unable to open rdf file.") } r, err := gzip.NewReader(f) if err != nil { glog.WithError(err).Fatal("Unable to create gzip reader.") } count, err := loader.HandleRdfReader(r, *mod) if err != nil { glog.WithError(err).Fatal("While handling rdf reader.") } glog.WithField("count", count).Info("RDFs parsed") r.Close() f.Close() } glog.Info("Calling merge lists") posting.MergeLists(100 * numCpus) // 100 per core. }