func (a *MemcacheDatastoreAccessor) cacheSameValues(c context.Context, cacheKeys []string, data interface{}, expiration time.Duration) error {
	N := len(cacheKeys)

	var buffer bytes.Buffer
	enc := gob.NewEncoder(&buffer)
	err := enc.Encode(&data)

	if err != nil {
		log.Debugf(c, "Failed encoding for cache keys [%v] : %v", cacheKeys, err)
		return err
	}

	items := make([]*memcache.Item, N)
	for i, cacheKey := range cacheKeys {
		cacheItem := &memcache.Item{
			Key:        cacheKey,
			Value:      buffer.Bytes(),
			Expiration: expiration,
		}
		items[i] = cacheItem
	}

	// Set the items, unconditionally, in 1 batch call
	err = memcache.SetMulti(c, items)
	if err != nil {
		log.Debugf(c, "Failed setting cache items: %v", cacheKeys, err)
	}
	return err
}
func (a *MemcacheDatastoreAccessor) cacheValues(c context.Context, cacheKeys []string, data []interface{}, expiration time.Duration) error {
	if len(cacheKeys) != len(data) {
		panic(fmt.Errorf("Wrong params length", len(cacheKeys), len(data)))
	}
	N := len(cacheKeys)

	var buffer bytes.Buffer
	enc := gob.NewEncoder(&buffer)

	items := make([]*memcache.Item, N)
	for i, cacheKey := range cacheKeys {
		cacheData := data[i]
		err := enc.Encode(&cacheData)
		if err != nil {
			log.Debugf(c, "Failed encoding for cache[%v] : %v", cacheKey, err)
			return err
		}
		cacheItem := &memcache.Item{
			Key:        cacheKey,
			Value:      buffer.Bytes(),
			Expiration: expiration,
		}
		items[i] = cacheItem
	}

	// Set the items, unconditionally, in 1 batch call
	err := memcache.SetMulti(c, items)
	if err != nil {
		log.Debugf(c, "Failed setting cache items: %v", cacheKeys, err)
	}
	return err
}
Ejemplo n.º 3
0
func save(ctx context.Context) {
	stats := stats(ctx)
	stats.wg.Wait()
	stats.Duration = time.Since(stats.Start)

	var buf_part, buf_full bytes.Buffer
	full := stats_full{
		Header: header(ctx),
		Stats:  stats,
	}
	if err := gob.NewEncoder(&buf_full).Encode(&full); err != nil {
		log.Errorf(ctx, "appstats Save error: %v", err)
		return
	} else if buf_full.Len() > bufMaxLen {
		// first try clearing stack traces
		for i := range full.Stats.RPCStats {
			full.Stats.RPCStats[i].StackData = ""
		}
		buf_full.Truncate(0)
		gob.NewEncoder(&buf_full).Encode(&full)
	}
	part := stats_part(*stats)
	for i := range part.RPCStats {
		part.RPCStats[i].StackData = ""
		part.RPCStats[i].In = ""
		part.RPCStats[i].Out = ""
	}
	if err := gob.NewEncoder(&buf_part).Encode(&part); err != nil {
		log.Errorf(ctx, "appstats Save error: %v", err)
		return
	}

	item_part := &memcache.Item{
		Key:   stats.PartKey(),
		Value: buf_part.Bytes(),
	}

	item_full := &memcache.Item{
		Key:   stats.FullKey(),
		Value: buf_full.Bytes(),
	}

	log.Infof(ctx, "Saved; %s: %s, %s: %s, link: %v",
		item_part.Key,
		byteSize(len(item_part.Value)),
		item_full.Key,
		byteSize(len(item_full.Value)),
		URL(ctx),
	)

	nc := storeContext(ctx)
	memcache.SetMulti(nc, []*memcache.Item{item_part, item_full})
}
Ejemplo n.º 4
0
// SetMulti set multiple object with keys
func (d *Driver) SetMulti(keys []string, src interface{}) error {
	var items = make([]*memcache.Item, 0)
	srcValue := reflect.ValueOf(src)
	for i, key := range keys {
		obj := srcValue.Index(i).Interface()
		buff, err := json.Marshal(obj)
		if err != nil {
			return err
		}
		items = append(items, &memcache.Item{
			Key:   key,
			Value: buff,
		})
	}
	return memcache.SetMulti(d.ctx, items)
}
Ejemplo n.º 5
0
// Object usually too big (1MB limit), so shard.
// http://stackoverflow.com/questions/9127982/
func BytesToShardedMemcache(c context.Context, key string, b []byte) {
	items := []*memcache.Item{}
	for i := 0; i < len(b); i += chunksize {
		k := fmt.Sprintf("=%d=%s", i, key)
		s, e := i, i+chunksize-1
		if e >= len(b) {
			e = len(b) - 1
		}
		log.Infof(c, " #=== [%7d, %7d] (%d) %s", s, e, len(b), k)
		items = append(items, &memcache.Item{Key: k, Value: b[s : e+1]}) // slice sytax is [s,e)
	}

	if err := memcache.SetMulti(c, items); err != nil {
		log.Errorf(c, " #=== cdb sharded store fail: %v", err)
	}

	log.Infof(c, " #=== Stored '%s' (len=%d)!", key, len(b))
}
Ejemplo n.º 6
0
func (g *Goon) putMemcache(srcs []interface{}, exists []byte) error {
	items := make([]*memcache.Item, len(srcs))
	payloadSize := 0
	for i, src := range srcs {
		toSerialize := src
		if exists[i] == 0 {
			toSerialize = nil
		}
		data, err := serializeStruct(toSerialize)
		if err != nil {
			g.error(err)
			return err
		}
		key, _, err := g.getStructKey(src)
		if err != nil {
			return err
		}
		// payloadSize will overflow if we push 2+ gigs on a 32bit machine
		payloadSize += len(data)
		items[i] = &memcache.Item{
			Key:   memkey(key),
			Value: data,
		}
	}
	memcacheTimeout := MemcachePutTimeoutSmall
	if payloadSize >= MemcachePutTimeoutThreshold {
		memcacheTimeout = MemcachePutTimeoutLarge
	}
	errc := make(chan error)
	go func() {
		tc, cf := context.WithTimeout(g.Context, memcacheTimeout)
		errc <- memcache.SetMulti(tc, items)
		cf()
	}()
	g.putMemoryMulti(srcs, exists)
	err := <-errc
	if appengine.IsTimeoutError(err) {
		g.timeoutError(err)
		err = nil
	} else if err != nil {
		g.error(err)
	}
	return err
}
Ejemplo n.º 7
0
func TestPutGetDelete(t *testing.T) {
	c, closeFunc := NewContext(t)
	defer closeFunc()

	type testEntity struct {
		IntVal int
	}

	// Check we set memcahce, put datastore and delete memcache.
	seq := make(chan string, 3)
	nds.SetMemcacheSetMulti(func(c context.Context,
		items []*memcache.Item) error {
		seq <- "memcache.SetMulti"
		return memcache.SetMulti(c, items)
	})
	nds.SetDatastorePutMulti(func(c context.Context,
		keys []*datastore.Key, vals interface{}) ([]*datastore.Key, error) {
		seq <- "datastore.PutMulti"
		return datastore.PutMulti(c, keys, vals)
	})
	nds.SetMemcacheDeleteMulti(func(c context.Context,
		keys []string) error {
		seq <- "memcache.DeleteMulti"
		close(seq)
		return memcache.DeleteMulti(c, keys)
	})

	incompleteKey := datastore.NewIncompleteKey(c, "Entity", nil)
	key, err := nds.Put(c, incompleteKey, &testEntity{43})
	if err != nil {
		t.Fatal(err)
	}

	nds.SetMemcacheSetMulti(memcache.SetMulti)
	nds.SetDatastorePutMulti(datastore.PutMulti)
	nds.SetMemcacheDeleteMulti(memcache.DeleteMulti)

	if s := <-seq; s != "memcache.SetMulti" {
		t.Fatal("memcache.SetMulti not", s)
	}
	if s := <-seq; s != "datastore.PutMulti" {
		t.Fatal("datastore.PutMulti not", s)
	}
	if s := <-seq; s != "memcache.DeleteMulti" {
		t.Fatal("memcache.DeleteMulti not", s)
	}
	// Check chan is closed.
	<-seq

	if key.Incomplete() {
		t.Fatal("Key is incomplete")
	}

	te := &testEntity{}
	if err := nds.Get(c, key, te); err != nil {
		t.Fatal(err)
	}

	if te.IntVal != 43 {
		t.Fatal("te.Val != 43", te.IntVal)
	}

	// Get from cache.
	te = &testEntity{}
	if err := nds.Get(c, key, te); err != nil {
		t.Fatal(err)
	}

	if te.IntVal != 43 {
		t.Fatal("te.Val != 43", te.IntVal)
	}

	// Change value.
	if _, err := nds.Put(c, key, &testEntity{64}); err != nil {
		t.Fatal(err)
	}

	// Get from cache.
	te = &testEntity{}
	if err := nds.Get(c, key, te); err != nil {
		t.Fatal(err)
	}

	if te.IntVal != 64 {
		t.Fatal("te.Val != 64", te.IntVal)
	}

	if err := nds.Delete(c, key); err != nil {
		t.Fatal(err)
	}

	if err := nds.Get(c, key, &testEntity{}); err != datastore.ErrNoSuchEntity {
		t.Fatal("expected datastore.ErrNoSuchEntity")
	}
}
Ejemplo n.º 8
0
// Save will write collected statistics to memcache. If client code
// is wrapped by a Handler, Save will be called transparently.
func Save(ctx context.Context) error {
	stats := stats(ctx)
	stats.wg.Wait()
	stats.Duration = time.Since(stats.Start)

	buf := bufp.Get().(*bytes.Buffer)
	defer bufp.Put(buf)

	enc := gob.NewEncoder(buf)

	full := statsFull{
		Header: header(ctx),
		Stats:  stats,
	}
	if err := enc.Encode(&full); err != nil {
		log.Errorf(ctx, "appstats: save: %v", err)
		return err
	}
	if bufMaxLen > 0 && buf.Len() > bufMaxLen {
		// buf grew too large, so we have to cut stuff down. Stack traces are
		// a good bet. They are strings, so are roughly consistent in length
		// even after encoding and are the most verbose component of the
		// stats.
		overflow := buf.Len() - bufMaxLen
		buf.Reset()

		// NOTE: The following loop operates on the data we threw in gob.Encoder
		// before, so it's not really accurate.
		for i := 0; overflow > 0 && i < len(stats.RPCStats); i++ {
			l := len(stats.RPCStats[i].StackData)
			trunc := min(l, overflow)
			overflow -= trunc
			stats.RPCStats[i].StackData = stats.RPCStats[i].StackData[0 : l-trunc]
		}
		enc.Encode(&full)
	}

	n := buf.Len()

	part := statsPart(*stats)
	for i := range part.RPCStats {
		part.RPCStats[i].StackData = ""
		part.RPCStats[i].In = ""
		part.RPCStats[i].Out = ""
	}
	if err := enc.Encode(&part); err != nil {
		log.Errorf(ctx, "appstats: save: %v", err)
		return err
	}

	b := buf.Bytes()

	items := []*memcache.Item{
		&memcache.Item{
			Key:   stats.FullKey(),
			Value: b[:n],
		},
		&memcache.Item{
			Key:   stats.PartKey(),
			Value: b[n:],
		},
	}

	if err := memcache.SetMulti(storeContext(ctx), items); err != nil {
		log.Errorf(ctx, "appstats: save: %v", err)
		return err
	}

	log.Infof(ctx, "appstats: %s", rurl(stats))
	return nil
}
Ejemplo n.º 9
0
func (m mcImpl) SetMulti(items []mc.Item, cb mc.RawCB) error {
	return doCB(memcache.SetMulti(m.aeCtx, mcMF2R(items)), cb)
}