func queryHandler(w http.ResponseWriter, r *http.Request) { addCorsHeaders(w) if r.Method == "OPTIONS" { return } if r.Method != "POST" { x.SetStatus(w, x.E_INVALID_METHOD, "Invalid method") return } var l query.Latency l.Start = time.Now() defer r.Body.Close() q, err := ioutil.ReadAll(r.Body) if err != nil || len(q) == 0 { x.Err(glog, err).Error("While reading query") x.SetStatus(w, x.E_INVALID_REQUEST, "Invalid request encountered.") return } glog.WithField("q", string(q)).Debug("Query received.") sg, err := gql.Parse(string(q)) if err != nil { x.Err(glog, err).Error("While parsing query") x.SetStatus(w, x.E_INVALID_REQUEST, err.Error()) return } l.Parsing = time.Since(l.Start) glog.WithField("q", string(q)).Debug("Query parsed.") rch := make(chan error) go query.ProcessGraph(sg, rch) err = <-rch if err != nil { x.Err(glog, err).Error("While executing query") x.SetStatus(w, x.E_ERROR, err.Error()) return } l.Processing = time.Since(l.Start) - l.Parsing glog.WithField("q", string(q)).Debug("Graph processed.") js, err := sg.ToJson(&l) if err != nil { x.Err(glog, err).Error("While converting to Json.") x.SetStatus(w, x.E_ERROR, err.Error()) return } glog.WithFields(logrus.Fields{ "total": time.Since(l.Start), "parsing": l.Parsing, "process": l.Processing, "json": l.Json, }).Info("Query Latencies") w.Header().Set("Content-Type", "application/json") fmt.Fprint(w, string(js)) }
func NewGraph(euid uint64, exid string) (*SubGraph, error) { // This would set the Result field in SubGraph, // and populate the children for attributes. if len(exid) > 0 { u, err := uid.GetOrAssign(exid) if err != nil { x.Err(glog, err).WithField("xid", exid).Error( "While GetOrAssign uid from external id") return nil, err } glog.WithField("xid", exid).WithField("_uid_", u).Debug("GetOrAssign") euid = u } if euid == 0 { err := fmt.Errorf("Query internal id is zero") x.Err(glog, err).Error("Invalid query") return nil, err } // Encode uid into result flatbuffer. b := flatbuffers.NewBuilder(0) omatrix := x.UidlistOffset(b, []uint64{euid}) // Also need to add nil value to keep this consistent. var voffset flatbuffers.UOffsetT { bvo := b.CreateByteVector(x.Nilbyte) task.ValueStart(b) task.ValueAddVal(b, bvo) voffset = task.ValueEnd(b) } task.ResultStartUidmatrixVector(b, 1) b.PrependUOffsetT(omatrix) mend := b.EndVector(1) task.ResultStartValuesVector(b, 1) b.PrependUOffsetT(voffset) vend := b.EndVector(1) task.ResultStart(b) task.ResultAddUidmatrix(b, mend) task.ResultAddValues(b, vend) rend := task.ResultEnd(b) b.Finish(rend) sg := new(SubGraph) sg.Attr = "_root_" sg.result = b.Bytes[b.Head():] // Also add query for consistency and to allow for ToJson() later. sg.query = createTaskQuery(sg.Attr, []uint64{euid}) return sg, nil }
func main() { flag.Parse() if !flag.Parsed() { glog.Fatal("Unable to parse flags") } logrus.SetLevel(logrus.InfoLevel) numCpus := runtime.NumCPU() prev := runtime.GOMAXPROCS(numCpus) glog.WithField("num_cpu", numCpus). WithField("prev_maxprocs", prev). Info("Set max procs to num cpus") ps := new(store.Store) ps.Init(*postingDir) defer ps.Close() clog := commit.NewLogger(*mutationDir, "dgraph", 50<<20) clog.SyncEvery = 1 clog.Init() defer clog.Close() posting.Init(ps, clog) http.HandleFunc("/query", queryHandler) glog.WithField("port", *port).Info("Listening for requests...") if err := http.ListenAndServe(":"+*port, nil); err != nil { x.Err(glog, err).Fatal("ListenAndServe") } }
func Parse(input string) (sg *query.SubGraph, rerr error) { l := lex.NewLexer(input) go run(l) sg = nil for item := range l.Items { if item.Typ == itemText { continue } if item.Typ == itemOpType { if item.Val == "mutation" { return nil, errors.New("Mutations not supported") } } if item.Typ == itemLeftCurl { if sg == nil { sg, rerr = getRoot(l) if rerr != nil { x.Err(glog, rerr).Error("While retrieving subgraph root") return nil, rerr } } else { if err := godeep(l, sg); err != nil { return sg, err } } } } return sg, nil }
func Query(entity []string, q1 string, q2 string) map[int]eRatio { rmap := make(map[int]eRatio) conn, err := grpc.Dial(":8081", grpc.WithInsecure()) if err != nil { x.Err(glog, err).Fatal("DialTCPConnection") } defer conn.Close() // Client for getting protocol buffer response c := graph.NewDGraphClient(conn) // Http client for getting JSON response. hc := &http.Client{Transport: &http.Transport{ MaxIdleConnsPerHost: 100, }} tInitial := time.Now() counter := 0 for time.Now().Sub(tInitial).Seconds() < *numsecs && counter < len(entity) { fmt.Println("counter", counter) d := entity[counter] q := q1 + d + q2 ne, r, jl := getRatio(q, hc, c) er := rmap[ne] er.count += 1 er.ratio = er.ratio + r er.jsonL = er.jsonL + jl rmap[ne] = er counter++ } return rmap }
func (s *Store) Init(filepath string) { s.opt = rocksdb.NewOptions() s.opt.SetCreateIfMissing(true) fp := rocksdb.NewBloomFilter(16) s.opt.SetFilterPolicy(fp) s.ropt = rocksdb.NewReadOptions() s.wopt = rocksdb.NewWriteOptions() s.wopt.SetSync(true) var err error s.db, err = rocksdb.Open(filepath, s.opt) if err != nil { x.Err(log, err).WithField("filepath", filepath). Fatal("While opening store") return } }
func (g *SubGraph) ToJson(l *Latency) (js []byte, rerr error) { r, err := postTraverse(g) if err != nil { x.Err(glog, err).Error("While doing traversal") return js, err } l.Json = time.Since(l.Start) - l.Parsing - l.Processing if len(r) == 1 { for _, ival := range r { m := ival.(map[string]interface{}) m["server_latency"] = l.ToMap() return json.Marshal(m) } } else { glog.Fatal("We don't currently support more than 1 uid at root.") } glog.Fatal("Shouldn't reach here.") return json.Marshal(r) }
func Parse(input string) (gq *GraphQuery, mu *Mutation, rerr error) { l := lex.NewLexer(input) go run(l) mu = nil gq = nil for item := range l.Items { if item.Typ == itemText { continue } if item.Typ == itemOpType { if item.Val == "mutation" { if mu != nil { return nil, nil, errors.New("Only one mutation block allowed.") } mu, rerr = getMutation(l) if rerr != nil { return nil, nil, rerr } } } if item.Typ == itemLeftCurl { if gq == nil { gq, rerr = getRoot(l) if rerr != nil { x.Err(glog, rerr).Error("While retrieving subgraph root") return nil, nil, rerr } } else { if err := godeep(l, gq); err != nil { return nil, nil, err } } } } return gq, mu, nil }
func ProcessGraph(sg *SubGraph, rch chan error) { var err error if len(sg.query) > 0 && sg.Attr != "_root_" { // This task execution would go over the wire in later versions. sg.result, err = posting.ProcessTask(sg.query) if err != nil { x.Err(glog, err).Error("While processing task.") rch <- err return } } uo := flatbuffers.GetUOffsetT(sg.result) r := new(task.Result) r.Init(sg.result, uo) sorted, err := sortedUniqueUids(r) if err != nil { x.Err(glog, err).Error("While processing task.") rch <- err return } if len(sorted) == 0 { // Looks like we're done here. if len(sg.Children) > 0 { glog.Debugf("Have some children but no results. Life got cut short early."+ "Current attribute: %q", sg.Attr) } else { glog.Debugf("No more things to process for Attr: %v", sg.Attr) } rch <- nil return } // Let's execute it in a tree fashion. Each SubGraph would break off // as many goroutines as it's children; which would then recursively // do the same thing. // Buffered channel to ensure no-blockage. childchan := make(chan error, len(sg.Children)) for i := 0; i < len(sg.Children); i++ { child := sg.Children[i] child.query = createTaskQuery(child.Attr, sorted) go ProcessGraph(child, childchan) } // Now get all the results back. for i := 0; i < len(sg.Children); i++ { err = <-childchan glog.WithFields(logrus.Fields{ "num_children": len(sg.Children), "index": i, "attr": sg.Children[i].Attr, "err": err, }).Debug("Reply from child") if err != nil { x.Err(glog, err).Error("While processing child task.") rch <- err return } } rch <- nil }
func postTraverse(g *SubGraph) (result map[uint64]interface{}, rerr error) { if len(g.query) == 0 { return result, nil } result = make(map[uint64]interface{}) // Get results from all children first. cResult := make(map[uint64]interface{}) for _, child := range g.Children { m, err := postTraverse(child) if err != nil { x.Err(glog, err).Error("Error while traversal") return result, err } // Merge results from all children, one by one. for k, v := range m { if val, present := cResult[k]; !present { cResult[k] = v } else { cResult[k] = mergeInterfaces(val, v) } } } // Now read the query and results at current node. uo := flatbuffers.GetUOffsetT(g.query) q := new(task.Query) q.Init(g.query, uo) ro := flatbuffers.GetUOffsetT(g.result) r := new(task.Result) r.Init(g.result, ro) if q.UidsLength() != r.UidmatrixLength() { glog.Fatal("Result uidmatrixlength: %v. Query uidslength: %v", r.UidmatrixLength(), q.UidsLength()) } if q.UidsLength() != r.ValuesLength() { glog.Fatalf("Result valuelength: %v. Query uidslength: %v", r.ValuesLength(), q.UidsLength()) } var ul task.UidList for i := 0; i < r.UidmatrixLength(); i++ { if ok := r.Uidmatrix(&ul, i); !ok { return result, fmt.Errorf("While parsing UidList") } l := make([]interface{}, ul.UidsLength()) for j := 0; j < ul.UidsLength(); j++ { uid := ul.Uids(j) m := make(map[string]interface{}) m["_uid_"] = fmt.Sprintf("%#x", uid) if ival, present := cResult[uid]; !present { l[j] = m } else { l[j] = mergeInterfaces(m, ival) } } if len(l) > 0 { m := make(map[string]interface{}) m[g.Attr] = l result[q.Uids(i)] = m } } var tv task.Value for i := 0; i < r.ValuesLength(); i++ { if ok := r.Values(&tv, i); !ok { return result, fmt.Errorf("While parsing value") } var ival interface{} if err := posting.ParseValue(&ival, tv.ValBytes()); err != nil { return result, err } if ival == nil { continue } if pval, present := result[q.Uids(i)]; present { glog.WithField("prev", pval). WithField("_uid_", q.Uids(i)). WithField("new", ival). Fatal("Previous value detected.") } m := make(map[string]interface{}) m["_uid_"] = fmt.Sprintf("%#x", q.Uids(i)) glog.WithFields(logrus.Fields{ "_uid_": q.Uids(i), "val": ival, }).Debug("Got value") m[g.Attr] = ival result[q.Uids(i)] = m } return result, nil }