Esempio n. 1
0
func (builder *stepsBuilder) LoadBatchFromCache(context appengine.Context) stream.OnDataFn {
	return func(data stream.T, emitter stream.Emitter) {
		batch := data.(*MemcacheLoadBatch)
		items, err := memcache.GetMulti(context, batch.Keys)

		if err != nil {
			panic(err)
		}

		for id, item := range items {
			if err := json.Unmarshal(item.Value, batch.Items[id]); err != nil {
				panic(err)
			}
			// Set entity key back
			batch.Items[id].Entity.SetKey(batch.Items[id].Key)
			delete(batch.Items, id)
		}

		// In case of cache misses, send entities
		// downstream to be handled by the next transformer
		if !batch.Empty() {
			for _, item := range batch.Items {
				emitter.Emit(item.Entity)
			}
		}
	}
}
Esempio n. 2
0
// GetMulti is a batch version of Get. Cached values are returned from memcache, uncached values are returned from
// datastore and memcached for next time.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface type I, or some non-interface
// non-pointer type P such that P or *P implements PropertyLoadSaver. If an []I, each element must be a valid
// dst for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a PropertyList is a slice of structs.
// It is treated as invalid to avoid being mistakenly passed when []PropertyList was intended.
func GetMulti(c appengine.Context, key []*datastore.Key, dst interface{}) error {
	if len(key) == 0 {
		return nil
	}
	// check cache
	encodedKeys := encodeKeys(key)
	itemMap, errm := memcache.GetMulti(c, encodedKeys)
	if len(itemMap) != len(key) {
		// TODO benchmark loading all vs loading missing
		// load from datastore
		errd := datastore.GetMulti(c, key, dst)
		if Debug {
			c.Debugf("reading from datastore: %#v", dst)
		}
		if errd != nil {
			return errd
		}
		// cache for next time
		errm = cache(key, dst, c)
	} else {
		errm = decodeItems(key, itemMap, dst)
		if Debug {
			c.Debugf("reading from memcache: %#v", dst)
		}
	}
	return errm
}
Esempio n. 3
0
func (cd Codec) GetMulti(c appengine.Context, keys []string,
	dst interface{}) ([]*memcache.Item, error) {

	itemsMap, err := memcache.GetMulti(c, keys)
	if err != nil {
		return nil, err
	}

	v := reflect.ValueOf(dst)
	items := make([]*memcache.Item, len(keys))
	multiErr := make(appengine.MultiError, len(keys))
	for i, key := range keys {
		if item, ok := itemsMap[key]; ok {
			err := cd.Unmarshal(item.Value, v.Index(i).Interface())
			if err != nil {
				multiErr[i] = err
			} else {
				item.Object = v.Index(i).Interface()
				items[i] = item
			}
		} else {
			multiErr[i] = memcache.ErrCacheMiss
		}
	}
	return items, multiErr
}
Esempio n. 4
0
func memGetMulti(c appengine.Context, keys []string, dests []interface{}) (items []*memcache.Item, errors []error) {
	items = make([]*memcache.Item, len(keys))
	errors = make([]error, len(keys))

	itemHash, err := memcache.GetMulti(c, keys)
	if err != nil {
		for index, _ := range errors {
			errors[index] = err
		}
		return
	}

	var item *memcache.Item
	var ok bool
	for index, keyHash := range keys {
		if item, ok = itemHash[keyHash]; ok {
			items[index] = item
			if err := MemCodec.Unmarshal(item.Value, dests[index]); err != nil {
				errors[index] = err
			}
		} else {
			errors[index] = memcache.ErrCacheMiss
		}
	}
	return
}
Esempio n. 5
0
/*
memGetMulti will look for all provided keys, and load them into the destinatinoPointers.

It will return the memcache.Items it found, and any errors the lookups caused.

If c is within a transaction no lookup will take place and errors will be slice of memcache.ErrCacheMiss.
*/
func memGetMulti(c TransactionContext, keys []string, destinationPointers []interface{}) (items []*memcache.Item, errors appengine.MultiError) {
	items = make([]*memcache.Item, len(keys))
	errors = make(appengine.MultiError, len(keys))
	if !MemcacheEnabled || c.InTransaction() {
		for index, _ := range errors {
			errors[index] = memcache.ErrCacheMiss
		}
		return
	}

	itemHash, err := memcache.GetMulti(c, keys)
	if err != nil {
		c.Errorf("Error doing GetMulti: %v", err)
		for index, _ := range errors {
			errors[index] = ErrCacheMiss
		}
		err = errors
	}

	var item *memcache.Item
	var ok bool
	for index, keyHash := range keys {
		if item, ok = itemHash[keyHash]; ok {
			items[index] = item
			if err := Codec.Unmarshal(item.Value, destinationPointers[index]); err != nil {
				errors[index] = err
			}
		} else {
			errors[index] = memcache.ErrCacheMiss
		}
	}
	return
}
//RetrieveMultiFromStore attempts to retrieve all elements from memcache
func RetrieveMultiFromStore(c *appengine.Context, ids []string) ([]SteamAccountDetails, []string, error) {
	allIDSet := mapset.NewSet()
	for _, id := range ids {
		allIDSet.Add(id)
	}

	foundIDs := mapset.NewSet()
	var foundAccounts []SteamAccountDetails

	items, err := memcache.GetMulti(*c, ids)
	if err != nil {
		return nil, nil, err
	}

	for key, item := range items {
		var acc SteamAccountDetails
		if err := json.Unmarshal(item.Value, &acc); err != nil {
			return nil, nil, err
		}
		foundIDs.Add(key)
		foundAccounts = append(foundAccounts, acc)
	}

	var missingIDs []string
	missingIDSet := allIDSet.Difference(foundIDs)
	for v := range missingIDSet.Iter() {
		missingIDs = append(missingIDs, v.(string))
	}

	return foundAccounts, missingIDs, nil

}
Esempio n. 7
0
// dst must be []Kind or []*Kind
func (d *Driver) GetMulti(keys []string, dst interface{}) error {
	var errors = make([]error, 0)
	if hits, err := memcache.GetMulti(d.ctx, keys); err == nil {
		dstValue := reflect.ValueOf(dst)
		for i, key := range keys {
			if item, ok := hits[key]; ok {
				objValue := dstValue.Index(i)
				if objValue.Kind() == reflect.Ptr {
					if string(item.Value) == _JSON_VALUE_NULL {
						continue
					}
					obj := reflect.New(objValue.Type().Elem()).Interface()
					if err = json.Unmarshal(item.Value, obj); err != nil {
						errors = append(errors, fmt.Errorf("Could not unmarshal JSON: %v", err))
						continue
					}
					objValue.Set(reflect.ValueOf(obj))
				} else {
					obj := objValue.Interface()
					if err = json.Unmarshal(item.Value, &obj); err != nil {
						errors = append(errors, fmt.Errorf("Could not unmarshal JSON: %v", err))
						continue
					}
				}
			}
		}
		if len(errors) > 0 {
			return fmt.Errorf("MultiError: %v", errors)
		}
		return nil
	} else {
		return err
	}
}
Esempio n. 8
0
func (c *memcacheDriver) GetMulti(keys []string) (map[string][]byte, error) {
	results, err := memcache.GetMulti(c.c, keys)
	if err != nil && err != memcache.ErrCacheMiss {
		return nil, err
	}
	value := make(map[string][]byte, len(results))
	for k, v := range results {
		value[k] = v.Value
	}
	return value, nil
}
Esempio n. 9
0
func AdminDateFormats(c mpg.Context, w http.ResponseWriter, r *http.Request) {
	type df struct {
		URL, Format string
	}
	keys := make([]string, dateFormatCount)
	for i := range keys {
		keys[i] = fmt.Sprintf("_dateformat-%v", i)
	}
	items, _ := memcache.GetMulti(c, keys)
	dfs := make(map[string]df)
	for k, v := range items {
		sp := strings.Split(string(v.Value), "|")
		dfs[k] = df{sp[1], sp[0]}
	}
	if err := templates.ExecuteTemplate(w, "admin-date-formats.html", dfs); err != nil {
		serveError(w, err)
	}
}
Esempio n. 10
0
func GetMulti(c appengine.Context, keys []string) ([]*memcache.Item, error) {
	itemsMap, err := memcache.GetMulti(c, keys)
	if err != nil {
		return nil, err
	}

	items := make([]*memcache.Item, len(keys))
	errs := make(appengine.MultiError, len(keys))
	for i, key := range keys {
		if item, ok := itemsMap[key]; ok {
			items[i] = item
		} else {
			errs[i] = memcache.ErrCacheMiss
		}
	}

	return items, errs
}
Esempio n. 11
0
func (db FlightDB) flightsFromShardedMemcache(key string) ([]f.Flight, bool) {
	/*
		if _,err := memcache.Gob.Get(db.C, memKey, &flights); err == nil {
			db.C.Infof(" ##== fdb cache hit for '%s'", memKey)
			return flights, true
		}*/

	keys := []string{}
	for i := 0; i < 32; i++ {
		keys = append(keys, fmt.Sprintf("=%d=%s", i*chunksize, key))
	}

	if items, err := memcache.GetMulti(db.C, keys); err != nil {
		db.C.Errorf("fdb memcache multiget: %v", err)
		return nil, false

	} else {
		b := []byte{}
		for i := 0; i < 32; i++ {
			if item, exists := items[keys[i]]; exists == false {
				break
			} else {
				db.C.Infof(" #--- Found '%s' !", item.Key)
				b = append(b, item.Value...)
			}
		}

		db.C.Infof(" #--- Final read len: %d", len(b))

		buf := bytes.NewBuffer(b)
		flights := []f.Flight{}
		if err := gob.NewDecoder(buf).Decode(&flights); err != nil {
			db.C.Errorf("fdb memcache multiget decode: %v", err)
			return nil, false
		}
		db.C.Infof(" #--- Found all items ?")

		return flights, true
	}
}
Esempio n. 12
0
// bool means 'found'
func BytesFromShardedMemcache(c appengine.Context, key string) ([]byte, bool) {
	keys := []string{}
	for i := 0; i < 32; i++ {
		keys = append(keys, fmt.Sprintf("=%d=%s", i*chunksize, key))
	}

	if items, err := memcache.GetMulti(c, keys); err != nil {
		c.Errorf("fdb memcache multiget: %v", err)
		return nil, false

	} else {
		b := []byte{}
		for i := 0; i < 32; i++ {
			if item, exists := items[keys[i]]; exists == false {
				break
			} else {
				c.Infof(" #=== Found '%s' !", item.Key)
				b = append(b, item.Value...)
			}
		}

		c.Infof(" #=== Final read len: %d", len(b))

		/*
			buf := bytes.NewBuffer(b)
			flights := []Flight{}
			if err := gob.NewDecoder(buf).Decode(&flights); err != nil {
				db.C.Errorf("fdb memcache multiget decode: %v", err)
				return nil,false
			}
		*/
		if len(b) > 0 {
			return b, true
		} else {
			return nil, false
		}
	}
}
Esempio n. 13
0
func (u GAE) MemoryGet(items *[]MemoryItem) error {
	if items == nil || len(*items) < 1 {
		return nil
	}

	keys := make([]string, len(*items))
	for index, item := range *items {
		keys[index] = item.Key
	}

	gaeItems, err := memcache.GetMulti(u.context, keys)
	if err != nil {
		return err
	}

	for index, item := range *items {
		if gaeItem, ok := gaeItems[item.Key]; ok {
			(*items)[index].Value = string(gaeItem.Value)
		}
	}

	return nil
}
Esempio n. 14
0
File: ui.go Progetto: rdterner/build
// populateBuildingURLs populates each commit in Commits' buildingURLs map with the
// URLs of builds which are currently in progress.
func (td *uiTemplateData) populateBuildingURLs(ctx appengine.Context) {
	// need are memcache keys: "building|<hash>|<gohash>|<builder>"
	// The hash is of the main "go" repo, or the subrepo commit hash.
	// The gohash is empty for the main repo, else it's the Go hash.
	var need []string

	commit := map[string]*Commit{} // commit hash -> Commit

	// Gather pending commits for main repo.
	for _, b := range td.Builders {
		for _, c := range td.Commits {
			if c.Result(b, "") == nil {
				commit[c.Hash] = c
				need = append(need, buildingKey(c.Hash, "", b))
			}
		}
	}

	// Gather pending commits for sub-repos.
	for _, ts := range td.TagState {
		goHash := ts.Tag.Hash
		for _, b := range td.Builders {
			for _, pkg := range ts.Packages {
				c := pkg.Commit
				commit[c.Hash] = c
				if c.Result(b, goHash) == nil {
					need = append(need, buildingKey(c.Hash, goHash, b))
				}
			}
		}
	}

	if len(need) == 0 {
		return
	}

	m, err := memcache.GetMulti(ctx, need)
	if err != nil {
		// oh well. this is a cute non-critical feature anyway.
		ctx.Debugf("GetMulti of building keys: %v", err)
		return
	}
	for k, it := range m {
		f := strings.SplitN(k, "|", 4)
		if len(f) != 4 {
			continue
		}
		hash, goHash, builder := f[1], f[2], f[3]
		c, ok := commit[hash]
		if !ok {
			continue
		}
		m := c.buildingURLs
		if m == nil {
			m = make(map[builderAndGoHash]string)
			c.buildingURLs = m
		}
		m[builderAndGoHash{builder, goHash}] = string(it.Value)
	}

}
Esempio n. 15
0
func Index(w http.ResponseWriter, r *http.Request) {
	keys := make([]string, modulus)
	for i := range keys {
		keys[i] = fmt.Sprintf(keyPart, i*distance)
	}

	c := context(r)
	items, err := memcache.GetMulti(c, keys)
	if err != nil {
		return
	}

	ars := AllRequestStats{}
	for _, v := range items {
		t := stats_part{}
		err := gob.NewDecoder(bytes.NewBuffer(v.Value)).Decode(&t)
		if err != nil {
			continue
		}
		r := RequestStats(t)
		ars = append(ars, &r)
	}
	sort.Sort(reverse{ars})

	requestById := make(map[int]*RequestStats, len(ars))
	idByRequest := make(map[*RequestStats]int, len(ars))
	requests := make(map[int]*StatByName)
	byRequest := make(map[int]map[string]cVal)
	for i, v := range ars {
		idx := i + 1
		requestById[idx] = v
		idByRequest[v] = idx
		requests[idx] = &StatByName{
			RequestStats: v,
		}
		byRequest[idx] = make(map[string]cVal)
	}

	requestByPath := make(map[string][]int)
	byCount := make(map[string]cVal)
	byRPC := make(map[SKey]cVal)
	for _, t := range ars {
		id := idByRequest[t]

		requestByPath[t.Path] = append(requestByPath[t.Path], id)

		for _, r := range t.RPCStats {
			rpc := r.Name()

			v := byRequest[id][rpc]
			v.count++
			v.cost += r.Cost
			byRequest[id][rpc] = v

			v = byCount[rpc]
			v.count++
			v.cost += r.Cost
			byCount[rpc] = v

			v = byRPC[SKey{rpc, t.Path}]
			v.count++
			v.cost += r.Cost
			byRPC[SKey{rpc, t.Path}] = v
		}
	}

	for k, v := range byRequest {
		stats := StatsByName{}
		for rpc, s := range v {
			stats = append(stats, &StatByName{
				Name:  rpc,
				Count: s.count,
				Cost:  s.cost,
			})
		}
		sort.Sort(reverse{stats})
		requests[k].SubStats = stats
	}

	statsByRPC := make(map[string]StatsByName)
	pathStats := make(map[string]StatsByName)
	for k, v := range byRPC {
		statsByRPC[k.a] = append(statsByRPC[k.a], &StatByName{
			Name:  k.b,
			Count: v.count,
			Cost:  v.cost,
		})
		pathStats[k.b] = append(pathStats[k.b], &StatByName{
			Name:  k.a,
			Count: v.count,
			Cost:  v.cost,
		})
	}
	for k, v := range statsByRPC {
		sort.Sort(reverse{v})
		statsByRPC[k] = v
	}

	pathStatsByCount := StatsByName{}
	for k, v := range pathStats {
		total := 0
		var cost int64
		for _, stat := range v {
			total += stat.Count
			cost += stat.Cost
		}
		sort.Sort(reverse{v})

		pathStatsByCount = append(pathStatsByCount, &StatByName{
			Name:       k,
			Count:      total,
			Cost:       cost,
			SubStats:   v,
			Requests:   len(requestByPath[k]),
			RecentReqs: requestByPath[k],
		})
	}
	sort.Sort(reverse{pathStatsByCount})

	allStatsByCount := StatsByName{}
	for k, v := range byCount {
		allStatsByCount = append(allStatsByCount, &StatByName{
			Name:     k,
			Count:    v.count,
			Cost:     v.cost,
			SubStats: statsByRPC[k],
		})
	}
	sort.Sort(reverse{allStatsByCount})

	v := struct {
		Env                 map[string]string
		Requests            map[int]*StatByName
		RequestStatsByCount map[int]*StatByName
		AllStatsByCount     StatsByName
		PathStatsByCount    StatsByName
	}{
		Env: map[string]string{
			"APPLICATION_ID": appengine.AppID(c),
		},
		Requests:         requests,
		AllStatsByCount:  allStatsByCount,
		PathStatsByCount: pathStatsByCount,
	}

	_ = templates.ExecuteTemplate(w, "main", v)
}
Esempio n. 16
0
// GetMulti is a batch version of Get.
//
// dst must be a *[]S, *[]*S, *[]I, []S, []*S, or []I, for some struct type S,
// or some interface type I. If *[]I or []I, each element must be a struct pointer.
func (g *Goon) GetMulti(dst interface{}) error {
	keys, err := g.extractKeys(dst, false) // don't allow incomplete keys on a Get request
	if err != nil {
		return err
	}

	v := reflect.Indirect(reflect.ValueOf(dst))

	if g.inTransaction {
		// todo: support getMultiLimit in transactions
		return datastore.GetMulti(g.Context, keys, v.Interface())
	}

	var dskeys []*datastore.Key
	var dsdst []interface{}
	var dixs []int

	var memkeys []string
	var mixs []int

	g.cacheLock.RLock()
	for i, key := range keys {
		m := memkey(key)
		vi := v.Index(i)

		if vi.Kind() == reflect.Struct {
			vi = vi.Addr()
		}

		if s, present := g.cache[m]; present {
			if vi.Kind() == reflect.Interface {
				vi = vi.Elem()
			}

			reflect.Indirect(vi).Set(reflect.Indirect(reflect.ValueOf(s)))
		} else {
			memkeys = append(memkeys, m)
			mixs = append(mixs, i)
			dskeys = append(dskeys, key)
			dsdst = append(dsdst, vi.Interface())
			dixs = append(dixs, i)
		}
	}
	g.cacheLock.RUnlock()

	if len(memkeys) == 0 {
		return nil
	}

	multiErr, any := make(appengine.MultiError, len(keys)), false

	memvalues, err := memcache.GetMulti(appengine.Timeout(g.Context, MemcacheGetTimeout), memkeys)
	if appengine.IsTimeoutError(err) {
		g.timeoutError(err)
		err = nil
	} else if err != nil {
		g.error(err) // timing out or another error from memcache isn't something to fail over, but do log it
		// No memvalues found, prepare the datastore fetch list already prepared above
	} else if len(memvalues) > 0 {
		// since memcache fetch was successful, reset the datastore fetch list and repopulate it
		dskeys = dskeys[:0]
		dsdst = dsdst[:0]
		dixs = dixs[:0]
		// we only want to check the returned map if there weren't any errors
		// unlike the datastore, memcache will return a smaller map with no error if some of the keys were missed

		for i, m := range memkeys {
			d := v.Index(mixs[i]).Interface()
			if v.Index(mixs[i]).Kind() == reflect.Struct {
				d = v.Index(mixs[i]).Addr().Interface()
			}
			if s, present := memvalues[m]; present {
				err := deserializeStruct(d, s.Value)
				if err == datastore.ErrNoSuchEntity {
					any = true // this flag tells GetMulti to return multiErr later
					multiErr[mixs[i]] = err
				} else if err != nil {
					g.error(err)
					return err
				} else {
					g.putMemory(d)
				}
			} else {
				dskeys = append(dskeys, keys[mixs[i]])
				dsdst = append(dsdst, d)
				dixs = append(dixs, mixs[i])
			}
		}
		if len(dskeys) == 0 {
			if any {
				return realError(multiErr)
			}
			return nil
		}
	}

	goroutines := (len(dskeys)-1)/getMultiLimit + 1
	var wg sync.WaitGroup
	wg.Add(goroutines)
	for i := 0; i < goroutines; i++ {
		go func(i int) {
			defer wg.Done()
			var toCache []interface{}
			var exists []byte
			lo := i * getMultiLimit
			hi := (i + 1) * getMultiLimit
			if hi > len(dskeys) {
				hi = len(dskeys)
			}
			gmerr := datastore.GetMulti(g.Context, dskeys[lo:hi], dsdst[lo:hi])
			if gmerr != nil {
				any = true // this flag tells GetMulti to return multiErr later
				merr, ok := gmerr.(appengine.MultiError)
				if !ok {
					g.error(gmerr)
					for j := lo; j < hi; j++ {
						multiErr[j] = gmerr
					}
					return
				}
				for i, idx := range dixs[lo:hi] {
					if merr[i] == nil {
						toCache = append(toCache, dsdst[lo+i])
						exists = append(exists, 1)
					} else {
						if merr[i] == datastore.ErrNoSuchEntity {
							toCache = append(toCache, dsdst[lo+i])
							exists = append(exists, 0)
						}
						multiErr[idx] = merr[i]
					}
				}
			} else {
				toCache = append(toCache, dsdst[lo:hi]...)
				exists = append(exists, bytes.Repeat([]byte{1}, hi-lo)...)
			}
			if len(toCache) > 0 {
				if err := g.putMemcache(toCache, exists); err != nil {
					g.error(err)
					// since putMemcache() gives no guarantee it will actually store the data in memcache
					// we log and swallow this error
				}

			}
		}(i)
	}
	wg.Wait()
	if any {
		return realError(multiErr)
	}
	return nil
}