func prepare() (dir1, dir2 string, ps *store.Store, clog *commit.Logger, rerr error) { var err error dir1, err = ioutil.TempDir("", "storetest_") if err != nil { return "", "", nil, nil, err } ps = new(store.Store) ps.Init(dir1) dir2, err = ioutil.TempDir("", "storemuts_") if err != nil { return dir1, "", nil, nil, err } clog = commit.NewLogger(dir2, "mutations", 50<<20) clog.Init() posting.Init(clog) worker.Init(ps) uid.Init(ps) loader.Init(ps, ps) f, err := os.Open("testdata.nq") if err != nil { return dir1, dir2, nil, clog, err } defer f.Close() _, err = loader.HandleRdfReader(f, 0, 1) if err != nil { return dir1, dir2, nil, clog, err } return dir1, dir2, ps, clog, nil }
func main() { flag.Parse() if !flag.Parsed() { glog.Fatal("Unable to parse flags") } logrus.SetLevel(logrus.InfoLevel) numCpus := *numcpu prev := runtime.GOMAXPROCS(numCpus) glog.WithField("num_cpu", numCpus). WithField("prev_maxprocs", prev). Info("Set max procs to num cpus") ps := new(store.Store) ps.Init(*postingDir) defer ps.Close() clog := commit.NewLogger(*mutationDir, "dgraph", 50<<20) clog.SyncEvery = 1 clog.Init() defer clog.Close() posting.Init(clog) worker.Init(ps) uid.Init(ps) http.HandleFunc("/query", queryHandler(ps)) glog.WithField("port", *port).Info("Listening for requests...") if err := http.ListenAndServe(":"+*port, nil); err != nil { x.Err(glog, err).Fatal("ListenAndServe") } }
func TestQuery(t *testing.T) { var numInstances uint64 = 2 mod := math.MaxUint64 / numInstances minIdx0 := 0 * mod minIdx1 := 1 * mod logrus.SetLevel(logrus.DebugLevel) dir, err := ioutil.TempDir("", "storetest_") if err != nil { t.Error(err) return } defer os.RemoveAll(dir) ps := new(store.Store) ps.Init(dir) clog := commit.NewLogger(dir, "mutations", 50<<20) clog.Init() defer clog.Close() posting.Init(clog) uid.Init(ps) list := []string{"alice", "bob", "mallory", "ash", "man", "dgraph"} for _, str := range list { if farm.Fingerprint64([]byte(str))%numInstances == 0 { uid, err := rdf.GetUid(str, 0, numInstances) if uid < minIdx0 || uid > minIdx0+mod-1 { t.Error("Not the correct UID", err) } t.Logf("Instance-0 Correct UID", str, uid) } else { uid, err := rdf.GetUid(str, 1, numInstances) if uid < minIdx1 || uid > minIdx1+mod-1 { t.Error("Not the correct UID", err) } t.Logf("Instance-1 Correct UID", str, uid) } } }
func main() { flag.Parse() if !flag.Parsed() { glog.Fatal("Unable to parse flags") } if len(*cpuprofile) > 0 { f, err := os.Create(*cpuprofile) if err != nil { glog.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } logrus.SetLevel(logrus.InfoLevel) numCpus := *numcpu prevProcs := runtime.GOMAXPROCS(numCpus) glog.WithField("num_cpus", numCpus). WithField("prev_maxprocs", prevProcs). Info("Set max procs to num cpus") glog.WithField("instanceIdx", *instanceIdx). WithField("numInstances", *numInstances). Info("Only XIDs with FP(xid)%numInstance == instanceIdx will be given UID") if len(*rdfGzips) == 0 { glog.Fatal("No RDF GZIP files specified") } ps := new(store.Store) ps.Init(*uidDir) defer ps.Close() posting.Init(nil) uid.Init(ps) loader.Init(nil, ps) files := strings.Split(*rdfGzips, ",") for _, path := range files { if len(path) == 0 { continue } glog.WithField("path", path).Info("Handling...") f, err := os.Open(path) if err != nil { glog.WithError(err).Fatal("Unable to open rdf file.") } r, err := gzip.NewReader(f) if err != nil { glog.WithError(err).Fatal("Unable to create gzip reader.") } count, err := loader.HandleRdfReaderWhileAssign(r, *instanceIdx, *numInstances) if err != nil { glog.WithError(err).Fatal("While handling rdf reader.") } glog.WithField("count", count).Info("RDFs parsed") r.Close() f.Close() } glog.Info("Calling merge lists") posting.MergeLists(100 * numCpus) // 100 per core. }