Exemple #1
0
// Increment increments the named counter.
func Increment(c context.Context, valName string) error {

	// Get counter config.
	shardsTotal := dsu.WrapInt{}
	dsu.McacheGet(c, mcKeyShardsTotal(valName), &shardsTotal)
	if shardsTotal.I < 1 {
		ckey := datastore.NewKey(c, dsKindNumShards, mcKeyShardsTotal(valName), 0, nil)
		errTx := datastore.RunInTransaction(c,
			func(c context.Context) error {
				err := datastore.Get(c, ckey, &shardsTotal)
				if err == datastore.ErrNoSuchEntity {
					shardsTotal.I = defaultNumShards
					_, err = datastore.Put(c, ckey, &shardsTotal)
				}
				return err
			}, nil)
		if errTx != nil {
			return errTx
		}
		dsu.McacheSet(c, mcKeyShardsTotal(valName), dsu.WrapInt{shardsTotal.I})
	}

	// pick random counter and increment it
	errTx := datastore.RunInTransaction(c,
		func(c context.Context) error {
			shardId := rand.Intn(shardsTotal.I)
			dsKey := datastore.NewKey(c, dsKindShard, keySingleShard(valName, shardId), 0, nil)
			var sd WrapShardData
			err := datastore.Get(c, dsKey, &sd)
			// A missing entity and a present entity will both work.
			if err != nil && err != datastore.ErrNoSuchEntity {
				return err
			}
			sd.Name = valName
			sd.ShardId = shardId
			sd.I++
			_, err = datastore.Put(c, dsKey, &sd)
			if ll > 2 {
				aelog.Infof(c, "ds put %v %v", dsKey, sd)
			}
			return err
		}, nil)
	if errTx != nil {
		return errTx
	}

	memcache.Increment(c, mcKey(valName), 1, 0)

	// collect number of updates
	//    per valName per instance in memory
	//    for every interval of 10 minutes
	//
	//  a batch job checks if the number of shards should be increased or decreased
	//    and truncates this map
	updateSamplingFrequency[valName+util.TimeMarker()[:len("2006-01-02 15:0")]] += 1

	return nil
}
Exemple #2
0
func parseFurther(w http.ResponseWriter, r *http.Request, saveImages bool) {

	c := appengine.NewContext(r)

	b := new(bytes.Buffer)
	defer func() {
		w.Header().Set("Content-type", "text/plain; charset=utf-8")
		w.Write(b.Bytes())
	}()

	// Get the item from the memcache
	wb1 := new(dsu.WrapBlob)
	ok := dsu.McacheGet(c, keyLatest, wb1)
	loghttp.E(w, r, ok, true)

	if ok {
		b.WriteString(sp("name %v\n", wb1.Name))
		b.WriteString(sp("S (boundary): %q\n", wb1.S))

		// dumps the entire body
		// b.WriteString(sp("B: %v\n", string(wb1.VByte)))

		// instead we split it by multipart mime
		vb := bytes.Split(wb1.VByte, []byte("--"+wb1.S))
		for i, v := range vb {
			h := ""  // header
			fn := "" // filename
			s := string(v)
			s = strings.Trim(s, "\r \n")
			ctype := ""

			b.WriteString(sp("\n___________mime boundary index %v___________\n", i))
			if strings.HasPrefix(s, "Content-Type: image/png;") ||
				strings.HasPrefix(s, "Content-Type: image/jpeg;") {

				if start := strings.Index(s, sepHeaderContent); start > 0 {
					h = s[:start]
					vh := strings.Split(h, "\r\n")
					for _, v := range vh {
						v := strings.TrimSpace(v)
						// b.WriteString("\t\t" + v + "\n")
						if strings.HasPrefix(v, "name=") {
							vv := strings.Split(v, "=")
							fn = stringspb.LowerCasedUnderscored(vv[1])
						}
					}
					s = s[start+len(sepHeaderContent):]
					if posSemicol := strings.Index(h, ";"); posSemicol > 0 {
						ctype = h[0:posSemicol]
					}
				}
			}

			if ctype == "" {
				b.WriteString("unparseable: " + stringspb.Ellipsoider(s, 400))
			} else {
				b.WriteString(sp("\n\tctype=%v\n\t------------", ctype))
				if fn != "" {
					b.WriteString(sp("\n\tfilename=%v\n\t------------", fn))
				}
				if saveImages {
					rE := resEntry{}
					rE.when = util.TimeMarker()
					rE.contentType = ctype
					rE.fn = fn
					rE.b64Img = &s
					Images[reservoirRevolver%reservoirSize] = rE
					reservoirRevolver++
					aelog.Infof(c, "Put image into reservoir %v %v", fn, ctype)
				}
			}

		}

	}

}
Exemple #3
0
// Count retrieves the value of the named counter.
// Either from memcache - or from datastore
func Count(c context.Context, valName string) (retVal int, err error) {

	wi := dsu.WrapInt{}
	errMc := dsu.McacheGet(c, mcKey(valName), &wi)
	if errMc == false {
		aelog.Errorf(c, "%v", errMc)
	}
	retVal = wi.I
	if retVal > 0 {
		if ll > 2 {
			aelog.Infof(c, "found counter %s = %v in memcache; return", mcKey(valName), wi.I)
		}
		retVal = 0
	}

Loop1:
	for j := 0; j < 1333; j++ {

		q := datastore.NewQuery(dsKindShard)

		q = q.Filter("Name =", valName)

		// because we have "hashed" the keys, we can no longer
		// range query them by key -
		//q = q.Filter("__key__ >=", valName+shardId )
		//q = q.Filter("__key__ < ",stringspb.IncrementString(valName+shardId) )

		q = q.Order("Name")
		q = q.Order("-ShardId")
		q = q.Limit(-1)
		q = q.Limit(batchSize)
		q = q.Offset(j * batchSize)
		cntr := 0
		iter := q.Run(c)
		for {
			var sd WrapShardData
			_, err = iter.Next(&sd)

			if err == datastore.Done {
				if ll > 2 {
					aelog.Infof(c, "       No Results (any more)  %v", err)
				}
				err = nil
				if cntr == 0 {
					if ll > 2 {
						aelog.Infof(c, "  Leaving Loop1")
					}
					break Loop1
				}
				break
			}
			cntr++
			retVal += sd.I
			if ll > 2 {
				aelog.Infof(c, "        %2vth shard: %v %v %4v - %4v", cntr, sd.Name, sd.ShardId, sd.I, retVal)
			}
		}
		if ll > 2 {
			aelog.Infof(c, "   %2v shards found - sum %4v", cntr, retVal)
		}

	}

	dsu.McacheSet(c, mcKey(valName), retVal)
	return

}
// Count retrieves the value of the named counter.
// Either from memcache - or from datastore
func Count(w http.ResponseWriter, r *http.Request, valName string) (retVal int, err error) {

	c := appengine.NewContext(r)

	wi := dsu.WrapInt{}
	errMc := dsu.McacheGet(c, mCKValue(valName), &wi)
	util_err.Err_http(w, r, errMc, false)
	retVal = wi.I
	if retVal > 0 {
		c.Infof("found counter %s = %v in memcache; return", mCKValue(valName), wi.I)
		retVal = 0
		//return
	}

Loop1:
	for j := 0; j < 1333; j++ {

		q := datastore.NewQuery(dsKindShard)

		q = q.Filter("Name =", valName)

		// because we have "hashed" the keys, we can no longer
		// range query them by key -
		//q = q.Filter("__key__ >=", valName+shardId )
		//q = q.Filter("__key__ < ",util.IncrementString(valName+shardId) )

		q = q.Order("Name")
		q = q.Order("-ShardId")

		q = q.Limit(-1)
		q = q.Limit(batchSize)

		//q = q.Offset(0)
		q = q.Offset(j * batchSize)

		cntr := 0
		iter := q.Run(c)
		for {
			var sd WrapShardData
			_, err = iter.Next(&sd)

			if err == datastore.Done {
				c.Infof("       No Results (any more)  %v", err)
				err = nil
				if cntr == 0 {
					c.Infof("  Leaving Loop1")
					break Loop1
				}
				break
			}
			cntr++
			retVal += sd.I
			c.Infof("        %2vth shard: %v %v %4v - %4v", cntr, sd.Name, sd.ShardId, sd.I, retVal)

			util_err.Err_http(w, r, err, false)
			// other err
			// if err != nil {
			// 	return retVal, err
			// }

		}

		c.Infof("   %2v shards found - sum %4v", cntr, retVal)

	}

	dsu.McacheSet(c, mCKValue(valName), retVal)
	return

}