Пример #1
0
func executeItemQuery(con *Context, q *datastore.Query, limit int, cursorStr string) ([]Item, string, error) {
	if cursor, err := datastore.DecodeCursor(cursorStr); err == nil {
		q = q.Start(cursor)
	}

	var is = make([]Item, 0, limit)
	var err error
	t := q.Run(con.C)
	for {
		var i Item
		_, err = t.Next(&i)
		if err == datastore.Done {
			break
		}

		is = append(is, i)
		if err != nil {
			con.Log.Errorf("Error fetching next item: %v", err)
			return nil, "", err
		}
	}

	var cursor datastore.Cursor
	if cursor, err = t.Cursor(); err == nil {
		return is, cursor.String(), nil
	}

	return nil, "", err
}
Пример #2
0
func (cdb ComplaintDB) NewIter(q *datastore.Query) *ComplaintIterator {
	ci := ComplaintIterator{
		//CDB:   cdb,
		Query: q,
		Iter:  q.Run(cdb.Ctx()),
	}
	return &ci
}
Пример #3
0
// subtreeByPath retrieves a subdirectories of a given directory.
// It is relying on an indexed string property "Dir"
// containing a string representation of the full path.
//
// It might be fast for deep, uncached directory subtree,
// that have been saved in nested manner.
//
// However, it might not find recently added directories.
// Upon finding nothing, it therefore returns the
// "warning" fsi.EmptyQueryResult
//
// The func could easily be enhanced chunked scanning.
//
// It is currently used by ReadDir and by the test package.
// It is public for the test package.
func (fs *dsFileSys) SubtreeByPath(name string, onlyDirectChildren bool) ([]DsDir, error) {

	dir, bname := fs.SplitX(name)
	name = dir + common.Filify(bname)
	if !strings.HasSuffix(name, sep) {
		name += sep
	}

	var q *datastore.Query

	if onlyDirectChildren {
		q = datastore.NewQuery(tdir).
			Filter("Dir=", name).
			Order("Dir")
		//  Limit(4)
	} else {
		pathInc := IncrementString(name)
		q = datastore.NewQuery(tdir).
			Filter("Dir>=", name).
			Filter("Dir<", pathInc).
			Order("Dir")
	}

	// log.Printf("%v", q)

	var children []DsDir
	keys, err := q.GetAll(fs.Ctx(), &children)
	if err != nil {
		aelog.Errorf(fs.Ctx(), "Error getting all children of %v => %v", dir+bname, err)
		return children, err
	}

	if len(children) < 1 {
		return children, fsi.EmptyQueryResult
	}

	// Very evil: We filter out root node, since it's
	// has the same dir as the level-1 directories.
	keyRoot := datastore.NewKey(fs.Ctx(), tdir, fs.RootDir(), 0, nil)
	idxRoot := -1

	for i := 0; i < len(children); i++ {
		children[i].fSys = fs
		children[i].Key = keys[i]
		if keys[i].Equal(keyRoot) {
			// log.Printf("root idx %v", i)
			idxRoot = i
		}
	}

	if idxRoot > -1 {
		children = append(children[:idxRoot], children[idxRoot+1:]...)
	}

	return children, nil

}
Пример #4
0
// Snarf down all the keys from the get go.
func (cdb *ComplaintDB) NewLongIter(q *datastore.Query) *LongIterator {
	ctx := cdb.Ctx()
	keys, err := q.KeysOnly().GetAll(ctx, nil)
	i := LongIterator{
		Ctx:  ctx,
		Keys: keys,
		err:  err,
	}

	return &i
}
Пример #5
0
// Snarf down all the keys from the get go.
func (cdb *ComplaintDB) NewLongBatchingIter(q *datastore.Query) *LongBatchingIterator {
	ctx := cdb.Ctx()
	keys, err := q.KeysOnly().GetAll(ctx, nil)
	i := LongBatchingIterator{
		Ctx:       ctx,
		BatchSize: 100,
		keys:      keys,
		vals:      []*types.Complaint{},
		err:       err,
	}

	return &i
}
Пример #6
0
func fillKeyQuery(c context.Context, q *datastore.Query, results interface{}) error {
	keys, err := q.GetAll(c, results)
	if err == nil {
		rslice := reflect.ValueOf(results).Elem()
		for i := range keys {
			if k, ok := rslice.Index(i).Interface().(Keyable); ok {
				k.setKey(keys[i])
			} else if k, ok := rslice.Index(i).Addr().Interface().(Keyable); ok {
				k.setKey(keys[i])
			} else {
				log.Infof(c, "Warning: %v is not Keyable", rslice.Index(i).Interface())
			}
		}
	} else {
		log.Errorf(c, "Error executing query: %v", err)
	}
	return err
}
Пример #7
0
func executeCallbackQuery(con *Context, q *datastore.Query, limit int) ([]Callback, error) {
	var is = make([]Callback, 0, limit)
	var err error
	t := q.Run(con.C)
	for {
		var i Callback
		_, err = t.Next(&i)
		if err == datastore.Done {
			break
		}

		is = append(is, i)
		if err != nil {
			con.Log.Errorf("Error fetching next item: %v", err)
			return nil, err
		}
	}

	return is, nil
}
Пример #8
0
func doQuery(ctx context.Context, query *datastore.Query, resultKey, taskKey *datastore.Key, dst interface{}) ([]*datastore.Key, error) {
	keys, err := query.
		Ancestor(resultKey).
		Filter("Task=", taskKey).
		Order("-Time").
		Limit(1).
		GetAll(ctx, &dst)
	if err != nil {
		return nil, err
	}
	return keys, nil
}
Пример #9
0
func (cdb ComplaintDB) getComplaintsByQueryFromDatastore(q *datastore.Query) ([]*datastore.Key, []types.Complaint, error) {

	cdb.Debugf("gCBQFD_200", "getComplaintsByQueryFromDatastore")

	var data = []types.Complaint{}

	cdb.Debugf("gCBQFD_201", "calling GetAll() ...")
	keys, err := q.GetAll(cdb.Ctx(), &data)
	cdb.Debugf("gCBQFD_202", "... call done (n=%d)", len(keys))

	// We tolerate missing fields, because the DB is full of old objects with dead fields
	if err != nil {
		if mismatchErr, ok := err.(*datastore.ErrFieldMismatch); ok {
			_ = mismatchErr
			// cdb.Debugf("gCBQFD_203", "missing field: %v", mismatchErr)
		} else {
			return nil, nil, fmt.Errorf("gCBQFD: %v", err)
		}
	}

	return keys, data, nil
}
Пример #10
0
// GetAll runs the query and returns all the keys that match the query, as well
// as appending the values to dst, setting the goon key fields of dst, and
// caching the returned data in local memory.
//
// For "keys-only" queries dst can be nil, however if it is not, then GetAll
// appends zero value structs to dst, only setting the goon key fields.
// No data is cached with "keys-only" queries.
//
// See: https://developers.google.com/appengine/docs/go/datastore/reference#Query.GetAll
func (g *Goon) GetAll(q *datastore.Query, dst interface{}) ([]*datastore.Key, error) {
	v := reflect.ValueOf(dst)
	vLenBefore := 0

	if dst != nil {
		if v.Kind() != reflect.Ptr {
			return nil, fmt.Errorf("goon: Expected dst to be a pointer to a slice or nil, got instead: %v", v.Kind())
		}

		v = v.Elem()
		if v.Kind() != reflect.Slice {
			return nil, fmt.Errorf("goon: Expected dst to be a pointer to a slice or nil, got instead: %v", v.Kind())
		}

		vLenBefore = v.Len()
	}

	keys, err := q.GetAll(g.Context, dst)
	if err != nil {
		g.error(err)
		return nil, err
	}
	if dst == nil || len(keys) == 0 {
		return keys, nil
	}

	keysOnly := ((v.Len() - vLenBefore) != len(keys))
	updateCache := !g.inTransaction && !keysOnly

	// If this is a keys-only query, we need to fill the slice with zero value elements
	if keysOnly {
		elemType := v.Type().Elem()
		ptr := false
		if elemType.Kind() == reflect.Ptr {
			elemType = elemType.Elem()
			ptr = true
		}

		if elemType.Kind() != reflect.Struct {
			return keys, fmt.Errorf("goon: Expected struct, got instead: %v", elemType.Kind())
		}

		for i := 0; i < len(keys); i++ {
			ev := reflect.New(elemType)
			if !ptr {
				ev = ev.Elem()
			}

			v.Set(reflect.Append(v, ev))
		}
	}

	if updateCache {
		g.cacheLock.Lock()
		defer g.cacheLock.Unlock()
	}

	for i, k := range keys {
		var e interface{}
		vi := v.Index(vLenBefore + i)
		if vi.Kind() == reflect.Ptr {
			e = vi.Interface()
		} else {
			e = vi.Addr().Interface()
		}

		if err := g.setStructKey(e, k); err != nil {
			return nil, err
		}

		if updateCache {
			// Cache lock is handled before the for loop
			g.cache[memkey(k)] = e
		}
	}

	return keys, nil
}
Пример #11
0
// Count returns the number of results for the query.
func (g *Goon) Count(q *datastore.Query) (int, error) {
	return q.Count(g.Context)
}
Пример #12
0
// Run runs the query.
func (g *Goon) Run(q *datastore.Query) *Iterator {
	return &Iterator{
		g: g,
		i: q.Run(g.Context),
	}
}
Пример #13
0
func (s *Store) GetKeysInQuery(q *datastore.Query) ([]*datastore.Key, error) {
	return q.KeysOnly().GetAll(s.c, nil)
}
Пример #14
0
func handler(w http.ResponseWriter, r *http.Request) {

	c := appengine.NewContext(r)
	u := user.Current(c)
	log.Infof(c, "Got a visitor to the front page!") //keep log in the imports

	//Check if the request is before or after to create the right query
	//The GET requests for the stories will be based around the SubmitDateTime
	//Using "after" will return stories after a certain date from newest to oldest
	//Using "before" will return stories before a certain date from oldest to newest
	//Default is to use the latest 3 submissions

	afterDate := r.FormValue("after")
	beforeDate := r.FormValue("before")
	returnLimit := 3
	showPrevLink := false

	var q *datastore.Query

	if afterDate != "" {
		showPrevLink = true
		//Get the results in descending order for newest to oldest
		afterDate = strings.Replace(afterDate, "%20", " ", -1) //replace all %20 with " "
		ttime, err := time.Parse(DateTimeDatastoreFormat, afterDate)
		if err != nil {
			serveError(c, w, err)
			return
		}
		q = datastore.NewQuery(WebSubmissionEntityName).
			Filter("SubmitDateTime <", ttime).
			Order("-SubmitDateTime").
			Limit(returnLimit)
	} else if beforeDate != "" {
		showPrevLink = true
		//Get the results is ascending order from oldest to newest
		beforeDate = strings.Replace(beforeDate, "%20", " ", -1) //replace all %20 with " "
		ttime, err := time.Parse(DateTimeDatastoreFormat, beforeDate)
		if err != nil {
			serveError(c, w, err)
			return
		}
		q = datastore.NewQuery(WebSubmissionEntityName).
			Filter("SubmitDateTime >", ttime).
			Order("SubmitDateTime").
			Limit(returnLimit)

		//limit check at the beginning if less than the returnLimit redo from the beginning
		length, cerr := q.Count(c)
		if cerr != nil {
			serveError(c, w, cerr)
		}

		//TODO refactor to not duplicate the default query below
		if length < returnLimit {
			showPrevLink = false
			q = datastore.NewQuery(WebSubmissionEntityName).
				Order("-SubmitDateTime").
				Limit(returnLimit)
		}
	} else {
		q = datastore.NewQuery(WebSubmissionEntityName).
			Order("-SubmitDateTime").
			Limit(returnLimit)

	}

	//Populate the results struct and store the keys
	var pageCon PageContainer

	for t := q.Run(c); ; {
		var x WebSubmission
		key, err := t.Next(&x)
		if err == datastore.Done {
			break
		}
		if err != nil {
			//			serveError(c,w,err)
			fmt.Fprintf(w, "nope %v", err.Error())
			return
		}
		if u == nil {
			pageCon.Stories = append(pageCon.Stories, StoryListData{x, key, false})
		} else {
			pageCon.Stories = append(pageCon.Stories, StoryListData{x, key, u.String() == x.SubmitBy})
		}
	}

	//if we filled up the page with results there are probably more, build the
	//next page link
	length, cerr := q.Count(c)
	if cerr != nil {
		serveError(c, w, cerr)
	}
	if length == returnLimit {
		//get the submit datetime of the last story
		pageCon.AfterLink = pageCon.Stories[returnLimit-1].Story.SubmitDateTime.Format(DateTimeDatastoreFormat)
	}

	//If it was a prev page press reverse the result array to get it back into chronological order
	if length >= 1 && beforeDate != "" {
		for i, j := 0, len(pageCon.Stories)-1; i < j; i, j = i+1, j-1 {
			pageCon.Stories[i], pageCon.Stories[j] = pageCon.Stories[j], pageCon.Stories[i]
		}
	}

	//prev page link
	//check the length because going forward you can have null data
	if showPrevLink && length >= 1 {
		pageCon.BeforeLink = pageCon.Stories[0].Story.SubmitDateTime.Format(DateTimeDatastoreFormat)
	}

	//build and show the page
	page := template.Must(template.ParseFiles(
		"public/templates/_base.html",
		"public/templates/storylist.html",
	))

	if err := page.Execute(w, pageCon); err != nil {
		serveError(c, w, err)
		fmt.Fprintf(w, "\n%v\n%v", err.Error(), pageCon)
		return
	}

}