func (s *Sql) GetEntity(subject string) ( result []x.Instruction, rerr error) { rows, err := s.db.Query(sqlSelect, subject) if err != nil { x.LogErr(log, err).Error("While querying for entity") return result, err } defer rows.Close() for rows.Next() { var i x.Instruction err := rows.Scan(&i.SubjectId, &i.SubjectType, &i.Predicate, &i.Object, &i.ObjectId, &i.NanoTs, &i.Source) if err != nil { x.LogErr(log, err).Error("While scanning") return result, err } result = append(result, i) } err = rows.Err() if err != nil { x.LogErr(log, err).Error("While finishing up on rows") return result, err } return result, nil }
func (si SimpleIndexer) Regenerate(e x.Entity) (rdoc x.Doc) { rdoc.Id = e.Id rdoc.Kind = e.Kind rdoc.NanoTs = time.Now().UnixNano() if e.Kind == "Post" { // If Post, figure out the total activity on it, so we can sort by that. result, err := store.NewQuery(e.Id).UptoDepth(1).Run() if err != nil { x.LogErr(log, err).Fatal("While querying db") return rdoc } data := result.ToMap() data["activity"] = len(result.Children) rdoc.Data = data } else { result, err := store.NewQuery(e.Id).UptoDepth(0).Run() if err != nil { x.LogErr(log, err).Fatal("While querying db") return rdoc } rdoc.Data = result.ToMap() } return }
func ExampleSearch() { path, err := ioutil.TempDir("", "gocrudldb_") if err != nil { x.LogErr(log, err).Fatal("Opening file") return } store.Get().Init(path) // leveldb search.Get().Init() // memsearch // Run indexer to update entities in search engine in real time. c := req.NewContextWithUpdates(10, 100) indexer.Register("Child", SimpleIndexer{}) indexer.Run(c, 2) u := store.NewUpdate("Root", "bigbang").SetSource("author") for i := 0; i < 10; i++ { child := u.AddChild("Child").Set("pos", i).Set("particle", particles[i]) if i == 5 { child.MarkDeleted() // This shouldn't be retrieved anymore. } } if err = u.Execute(c); err != nil { x.LogErr(log, err).Fatal("While updating") return } indexer.WaitForDone(c) // Block until indexing is done. docs, err := search.Get().NewQuery("Child").Order("-data.pos").Run() if err != nil { x.LogErr(log, err).Fatal("While searching") return } fmt.Println("docs:", len(docs)) for _, doc := range docs { m := doc.Data.(map[string]interface{}) fmt.Println(m["pos"], m["particle"]) } // Output: // docs: 9 // 9 higgs boson // 8 boson // 7 photon // 6 bottom // 4 down // 3 gluon // 2 top // 1 charm // 0 up }
func ExampleStore() { path, err := ioutil.TempDir("", "gocrudldb_") if err != nil { x.LogErr(log, err).Fatal("Opening file") return } store.Get().Init(path) // leveldb // Update some data. c := req.NewContext(10) // 62^10 permutations err = store.NewUpdate("Root", "bigbang").SetSource("author"). Set("when", "13.8 billion years ago").Set("explosive", true).Execute(c) if err != nil { x.LogErr(log, err).Fatal("Commiting update") return } // Retrieve that data result, err := store.NewQuery("bigbang").Run() if err != nil { x.LogErr(log, err).Fatal("While querying store") return } fmt.Println(result.Kind) // Root fmt.Println(result.Id) // bigbang data := result.ToMap() { val, ok := data["explosive"] if !ok { log.Fatal("creator should be set") return } fmt.Println(val) // true } { val, ok := data["when"] if !ok { log.Fatal("creator should be set") return } fmt.Println(val) } // Output: // Root // bigbang // true // 13.8 billion years ago }
func (cs *Cassandra) Iterate(fromId string, num int, ch chan x.Entity) (rnum int, rlast x.Entity, rerr error) { iter := cs.session.Query(kScan, fromId, num).Iter() var e x.Entity handled := make(map[x.Entity]bool) rnum = 0 for iter.Scan(&e.Kind, &e.Id) { rlast = e if _, present := handled[e]; present { continue } ch <- e rnum += 1 handled[e] = true if rnum >= num { break } } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return rnum, rlast, err } return rnum, rlast, nil }
func processUpdates(c *req.Context) { defer wg.Done() for entity := range c.Updates { idxr, pok := Get(entity.Kind) if !pok { continue } dirty := idxr.OnUpdate(entity) for _, de := range dirty { didxr, dok := Get(de.Kind) if !dok { continue } doc := didxr.Regenerate(de) log.WithField("doc", doc).Debug("Regenerated doc") if search.Get() == nil { continue } err := search.Get().Update(doc) if err != nil { x.LogErr(log, err).WithField("doc", doc). Error("While updating in search engine") } } } log.Info("Finished processing channel") }
func (s *Sql) Init(args ...string) { if len(args) != 3 { log.WithField("args", args).Fatal("Invalid arguments") return } dbtype := args[0] source := args[1] tablename := args[2] var err error s.db, err = sql.Open(dbtype, source) if err != nil { x.LogErr(log, err).Fatal("While opening connection") return } if err = s.db.Ping(); err != nil { x.LogErr(log, err).Fatal("While pinging db") return } var insert string switch dbtype { case "postgres": insert = fmt.Sprintf(`insert into %s (subject_id, subject_type, predicate, object, object_id, nano_ts, source) values ($1, $2, $3, $4, $5, $6, $7)`, tablename) sqlIsNew = fmt.Sprintf("select subject_id from %s where subject_id = $1 limit 1", tablename) sqlSelect = fmt.Sprintf(`select subject_id, subject_type, predicate, object, object_id, nano_ts, source from %s where subject_id = $1`, tablename) default: insert = fmt.Sprintf(`insert into %s (subject_id, subject_type, predicate, object, object_id, nano_ts, source) values (?, ?, ?, ?, ?, ?, ?)`, tablename) sqlIsNew = fmt.Sprintf("select subject_id from %s where subject_id = ? limit 1", tablename) sqlSelect = fmt.Sprintf(`select subject_id, subject_type, predicate, object, object_id, nano_ts, source from %s where subject_id = ?`, tablename) } sqlInsert, err = s.db.Prepare(insert) if err != nil { panic(err) } }
func (l *Leveldb) Init(_ string, filepath string) { var err error l.db, err = leveldb.OpenFile(filepath, l.opt) if err != nil { x.LogErr(log, err).Fatal("While opening leveldb") return } }
func (rdb *RethinkDB) IsNew(subject string) bool { iter, err := r.Table(rdb.table).Get(subject).Run(rdb.session) if err != nil { x.LogErr(log, err).Error("While running query") return false } isnew := true if !iter.IsNil() { isnew = true } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return false } return isnew }
func (ds *Datastore) Commit(its []*x.Instruction) error { var keys []*datastore.Key for _, i := range its { dkey := ds.getIKey(*i) keys = append(keys, dkey) } client, err := datastore.NewClient(ds.ctx, ds.projectId) if err != nil { x.LogErr(log, err).Error("While creating new client") return err } if _, err := client.PutMulti(ds.ctx, keys, its); err != nil { x.LogErr(log, err).Error("While committing instructions") return err } log.Debugf("%d Instructions committed", len(its)) return nil }
// Init initializes connection to Elastic Search instance, checks for // existence of "gocrud" index and creates it, if missing. Note that // Init does NOT do mapping necessary to do exact-value term matching // for strings etc. That needs to be done externally. func (es *Elastic) Init(args ...string) { if len(args) != 1 { log.WithField("args", args).Fatal("Invalid arguments") return } url := args[0] log.Debug("Initializing connection to ElaticSearch") var opts []elastic.ClientOptionFunc opts = append(opts, elastic.SetURL(url)) opts = append(opts, elastic.SetSniff(false)) client, err := elastic.NewClient(opts...) if err != nil { x.LogErr(log, err).Fatal("While creating connection with ElaticSearch.") return } version, err := client.ElasticsearchVersion(url) if err != nil { x.LogErr(log, err).Fatal("Unable to query version") return } log.WithField("version", version).Debug("ElasticSearch version") // Use the IndexExists service to check if a specified index exists. exists, err := client.IndexExists("gocrud").Do() if err != nil { x.LogErr(log, err).Fatal("Unable to query index existence.") return } if !exists { // Create a new index. createIndex, err := client.CreateIndex("gocrud").Do() if err != nil { x.LogErr(log, err).Fatal("Unable to create index.") return } if !createIndex.Acknowledged { // Not acknowledged log.Errorf("Create index not acknowledged. Not sure what that means...") } } es.client = client log.Debug("Connected with ElasticSearch") }
func (rdb *RethinkDB) Commit(its []*x.Instruction) error { res, err := r.Table(rdb.table).Insert(its).RunWrite(rdb.session) if err != nil { x.LogErr(log, err).Error("While executing batch") return nil } log.WithField("inserted", res.Inserted+res.Replaced).Debug("Stored instructions") return nil }
// GetEntity retrieves all documents matching the subject identifier func (mdb *MongoDB) GetEntity(tablePrefix string, subject string) (result []x.Instruction, err error) { c := mdb.session.DB(mdb.database).C(mdb.collection) err = c.Find(bson.M{"subjectid": subject}).All(&result) if err != nil { x.LogErr(log, err).Error("While running query") } return result, err }
func (ds *Datastore) IsNew(id string) bool { dkey := datastore.NewKey(ds.ctx, ds.tablePrefix+"Entity", id, 0, nil) client, err := datastore.NewClient(ds.ctx, ds.projectId) if err != nil { x.LogErr(log, err).Error("While creating client") return false } q := datastore.NewQuery(ds.tablePrefix + "Instruction").Ancestor(dkey). Limit(1).KeysOnly() keys, err := client.GetAll(ds.ctx, q, nil) if err != nil { x.LogErr(log, err).Error("While GetAll") return false } if len(keys) > 0 { return false } return true }
func (s *Sql) Commit(its []*x.Instruction) error { for _, it := range its { if _, err := sqlInsert.Exec(it.SubjectId, it.SubjectType, it.Predicate, it.Object, it.ObjectId, it.NanoTs, it.Source); err != nil { x.LogErr(log, err).Error("While inserting row in sql") return err } } return nil }
func (ds *Datastore) Init(_ string, project string) { client, err := google.DefaultClient(oauth2.NoContext, "https://www.googleapis.com/auth/devstorage.full_control") if err != nil { x.LogErr(log, err).Fatal("Unable to get client") } ds.ctx = cloud.NewContext(project, client) if ds.ctx == nil { log.Fatal("Failed to get context. context is nil") } log.Info("Connection to Google datastore established") }
func (cs *Cassandra) Commit(its []*x.Instruction) error { b := cs.session.NewBatch(gocql.LoggedBatch) for _, it := range its { b.Query(kInsert, it.SubjectId, it.SubjectType, it.Predicate, it.Object, it.ObjectId, it.NanoTs, it.Source) } if err := cs.session.ExecuteBatch(b); err != nil { x.LogErr(log, err).Error("While executing batch") } log.WithField("len", len(its)).Debug("Stored instructions") return nil }
func (l *Leveldb) Commit(_ string, its []*x.Instruction) error { var keys []string for _, it := range its { var key string for m := 0; m < 10; m++ { key = fmt.Sprintf("%s_%s", it.SubjectId, x.UniqueString(5)) log.WithField("key", key).Debug("Checking existence of key") if has, err := l.db.Has([]byte(key), nil); err != nil { x.LogErr(log, err).WithField("key", key).Error("While check if key exists") continue } else if has { continue } else { break } log.Errorf("Exhausted %d tries", m) return errors.New("Exhausted tries") } log.WithField("key", key).Debug("Is unique") keys = append(keys, key) } b := new(leveldb.Batch) for idx, it := range its { key := []byte(keys[idx]) buf, err := it.GobEncode() if err != nil { x.LogErr(log, err).Error("While encoding") return err } b.Put(key, buf) } if err := l.db.Write(b, nil); err != nil { x.LogErr(log, err).Error("While writing to db") return err } log.Debugf("%d instructions committed", len(its)) return nil }
func (cs *Cassandra) IsNew(subject string) bool { iter := cs.session.Query(kIsNew, subject).Iter() var sid string isnew := true for iter.Scan(&sid) { isnew = false } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return false } return isnew }
func (rdb *RethinkDB) GetEntity(subject string) ( result []x.Instruction, rerr error, ) { iter, err := r.Table(rdb.table).GetAllByIndex("SubjectId", subject).Run(rdb.session) if err != nil { x.LogErr(log, err).Error("While running query") return result, err } err = iter.All(&result) if err != nil { x.LogErr(log, err).Error("While iterating") return result, err } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While closing iterator") return result, err } return result, nil }
func (l *Leveldb) GetEntity(_, id string) (result []x.Instruction, rerr error) { slice := util.BytesPrefix([]byte(id)) iter := l.db.NewIterator(slice, nil) for iter.Next() { buf := iter.Value() if buf == nil { break } var i x.Instruction if err := i.GobDecode(buf); err != nil { x.LogErr(log, err).Error("While decoding") return result, err } result = append(result, i) } iter.Release() err := iter.Error() if err != nil { x.LogErr(log, err).Error("While iterating") } return result, err }
func (ds *Datastore) Commit(t string, its []*x.Instruction) error { var keys []*datastore.Key for _, i := range its { dkey := ds.getIKey(*i, t) keys = append(keys, dkey) } if _, err := datastore.PutMulti(ds.ctx, keys, its); err != nil { x.LogErr(log, err).Error("While committing instructions") return err } log.Debugf("%d Instructions committed", len(its)) return nil }
// Retrieve the parent id for given entity id. Return ErrNoParent if parent is // not present. Otherwise, if an error occurs during retrieval, returns that. func Parent(id string) (parentid string, rerr error) { its, err := Get().GetEntity(id) if err != nil { x.LogErr(log, err).WithField("id", id).Error("While retrieving entity") return "", err } for _, it := range its { if it.Predicate == "_parent_" { return it.ObjectId, nil } } return "", ErrNoParent }
// Update checks the validify of given document, and the. // external versioning via the timestamp of the document. func (es *Elastic) Update(doc x.Doc) error { if doc.Id == "" || doc.Kind == "" || doc.NanoTs == 0 { return errors.New("Invalid document") } result, err := es.client.Index().Index("gocrud").Type(doc.Kind).Id(doc.Id). VersionType("external").Version(doc.NanoTs).BodyJson(doc).Do() if err != nil { x.LogErr(log, err).WithField("doc", doc).Error("While indexing doc") return err } log.Debug("index_result", result) return nil }
func (cs *Cassandra) GetEntity(subject string) ( result []x.Instruction, rerr error) { iter := cs.session.Query(kSelect, subject).Iter() var i x.Instruction for iter.Scan(&i.SubjectId, &i.SubjectType, &i.Predicate, &i.Object, &i.ObjectId, &i.NanoTs, &i.Source) { result = append(result, i) } if err := iter.Close(); err != nil { x.LogErr(log, err).Error("While iterating") return result, err } return result, nil }
func (si SimpleIndexer) Regenerate(e x.Entity) (rdoc x.Doc) { rdoc.Id = e.Id rdoc.Kind = e.Kind rdoc.NanoTs = time.Now().UnixNano() result, err := store.NewQuery(e.Id).Run() if err != nil { x.LogErr(log, err).Fatal("While querying store") return } data := result.ToMap() rdoc.Data = data return }
func (l *Leveldb) Init(args ...string) { if len(args) != 1 { log.WithField("args", args).Fatal("Invalid arguments") return } filepath := args[0] var err error l.db, err = leveldb.OpenFile(filepath, l.opt) if err != nil { x.LogErr(log, err).Fatal("While opening leveldb") return } }
func (s *Sql) IsNew(subject string) bool { rows, err := s.db.Query(sqlIsNew, subject) if err != nil { x.LogErr(log, err).Error("While checking is new") return false } defer rows.Close() var sub string isnew := true for rows.Next() { if err := rows.Scan(&sub); err != nil { x.LogErr(log, err).Error("While scanning") return false } log.WithField("subject_id", sub).Debug("Found existing subject_id") isnew = false } if err = rows.Err(); err != nil { x.LogErr(log, err).Error("While iterating") return false } return isnew }
// Commit inserts the instructions into the collection as documents func (mdb *MongoDB) Commit(_ string, its []*x.Instruction) error { c := mdb.session.DB(mdb.database).C(mdb.collection) for _, i := range its { err := c.Insert(i) if err != nil { x.LogErr(log, err).Error("While executing batch") return nil } } log.WithField("inserted", len(its)).Debug("Stored instructions") return nil }
func (l *Leveldb) Iterate(fromId string, num int, ch chan x.Entity) (rnum int, rlast x.Entity, rerr error) { slice := util.Range{Start: []byte(fromId)} iter := l.db.NewIterator(&slice, nil) rnum = 0 handled := make(map[x.Entity]bool) for iter.Next() { buf := iter.Value() if buf == nil { break } var i x.Instruction if err := i.GobDecode(buf); err != nil { x.LogErr(log, err).Error("While decoding") return rnum, rlast, err } e := x.Entity{Kind: i.SubjectType, Id: i.SubjectId} rlast = e if _, present := handled[e]; present { continue } ch <- e handled[e] = true rnum += 1 if rnum >= num { break } } iter.Release() err := iter.Error() if err != nil { x.LogErr(log, err).Error("While iterating") } return rnum, rlast, err }