func TriggerBatchProcessing(c appengine.Context, article ArticleId) error { // Instead of submitting a task to match incoming bids, resulting in one task per bid, // we collect bids for up to two seconds and batch-process them afterwards. semaphoreKey := "semaphore-" + string(article) if semaphore, err := memcache.Increment(c, semaphoreKey, 1, 0); err != nil { return err } else if semaphore >= 2 { c.Infof("Batch processing already triggered for article %v", article) memcache.IncrementExisting(c, semaphoreKey, -1) return nil } else { time.Sleep(1 * time.Second) c.Infof("Starting batch processing...") memcache.IncrementExisting(c, semaphoreKey, -1) time_before := time.Now() matchingErr := MatchIncomingBids(c, article) time_after := time.Now() duration := time_after.Sub(time_before) if duration > 1000*time.Millisecond { c.Errorf("Batch processing finished after %v. Limit exceeded!", duration) } else if duration > 500*time.Millisecond { c.Warningf("Batch processing finished after %v. Limit in danger.", duration) } else { c.Infof("Batch processing finished after %v.", duration) } return matchingErr } }
func Counter(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) // 未設定の場合は第4引数の値で初期化する // memcache.IncrementExistingは未設定だとエラーになる if newValue, err := memcache.Increment(c, "inc", 1, 0); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } else { fmt.Fprintf(w, "newValue = %d\n", newValue) } if stats, err := memcache.Stats(c); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } else { // キャッシュヒットとなる要求の回数 fmt.Fprintf(w, "Hits = %d\n", stats.Hits) // キャッシュミスとなる要求の回数 fmt.Fprintf(w, "Misses = %d\n", stats.Misses) // 取得要求時の総データ転送量 fmt.Fprintf(w, "ByteHits = %d\n", stats.ByteHits) // キャッシュに保存されているキーと値のペア数 fmt.Fprintf(w, "Items = %d\n", stats.Items) // キャッシュ内のすべてのアイテムの合計サイズ fmt.Fprintf(w, "Bytes = %d\n", stats.Bytes) // キャッシュ内の一番古いアイテムにアクセスされた時からの秒数 fmt.Fprintf(w, "Oldest = %d\n", stats.Oldest) } }
func Rate(c appengine.Context, clientID string) uint64 { rate, err := memcache.Increment(c, cacheKey(clientID), 0, 0) if err != nil { return 0 } return uint64(rate) }
// Tick sets the current logical datastore time to a never-before-used time // and returns that time. It should be called to invalidate the cache. func Tick(c appengine.Context) uint64 { t, err := memcache.Increment(c, timeKey, 1, newTime()) if err != nil { c.Errorf("cache.Tick: %v", err) return 0 } return t }
// Now returns the current logical datastore time to use for cache lookups. func Now(c appengine.Context) uint64 { t, err := memcache.Increment(c, TimeKey, 0, newTime()) if err != nil { c.Errorf("cache.Now: %v", err) return 0 } return t }
// Increment increments the named counter. func Increment(c appengine.Context, valName string) error { // Get counter config. wNumShards := dsu.WrapInt{} dsu.McacheGet(c, mCKNumShards(valName), &wNumShards) if wNumShards.I < 1 { ckey := datastore.NewKey(c, dsKindNumShards, mCKNumShards(valName), 0, nil) errTx := datastore.RunInTransaction(c, func(c appengine.Context) error { err := datastore.Get(c, ckey, &wNumShards) if err == datastore.ErrNoSuchEntity { wNumShards.I = defaultNumShards _, err = datastore.Put(c, ckey, &wNumShards) } return err }, nil) if errTx != nil { return errTx } dsu.McacheSet(c, mCKNumShards(valName), dsu.WrapInt{wNumShards.I}) } // pick random counter and increment it errTx := datastore.RunInTransaction(c, func(c appengine.Context) error { shardId := rand.Intn(wNumShards.I) dsKey := datastore.NewKey(c, dsKindShard, dSKSingleShard(valName, shardId), 0, nil) var sd WrapShardData err := datastore.Get(c, dsKey, &sd) // A missing entity and a present entity will both work. if err != nil && err != datastore.ErrNoSuchEntity { return err } sd.Name = valName sd.ShardId = shardId sd.I++ _, err = datastore.Put(c, dsKey, &sd) c.Infof("ds put %v %v", dsKey, sd) return err }, nil) if errTx != nil { return errTx } memcache.Increment(c, mCKValue(valName), 1, 0) // collect number of updates // per valName per instance in memory // for every interval of 10 minutes // // a batch job checks if the number of shards should be increased or decreased // and truncates this map updateSamplingFrequency[valName+util.TimeMarker()[:len("2006-01-02 15:0")]] += 1 return nil }
func Check(c appengine.Context, clientID string, limit uint64) (uint64, bool) { rate, err := memcache.Increment(c, cacheKey(clientID), 1, 0) if err != nil { return 0, true } if err == nil && rate > limit { return rate, false } return rate, true }
func Incr(c TransactionContext, key string, delta int64, initial uint64) (newValue uint64, err error) { k, err := Keyify(key) if err != nil { return } if newValue, err = memcache.Increment(c, k, delta, initial); err != nil { err = errors.Errorf("Error doing Increment %#v: %v", k, err) return } return }
// IncrementBy increments the named counter by a specified amount. func IncrementBy(c appengine.Context, name string, by int) error { // Get counter config. cfg, err := cfgMemo.getOrCreate(c, name) if err != nil { return err } err = datastore.RunInTransaction(c, func(c appengine.Context) error { shardName := fmt.Sprintf("shard%d", rand.Intn(cfg.Shards)) key := datastore.NewKey(c, shardKind, shardName, 0, nil) var s shard err := datastore.Get(c, key, &s) // A missing entity and a present entity will both work. if err != nil && err != datastore.ErrNoSuchEntity { return err } s.Count += by _, err = datastore.Put(c, key, &s) return err }, nil) if err == nil { memcache.Increment(c, memcacheKey(name), int64(by), 0) } return err }
func Increment(c appengine.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) { return memcache.Increment(c, key, delta, initialValue) }