Exemplo n.º 1
0
// based on bytes.Buffer and Writing into it
func VVByte_to_string(m [][]byte) (*bytes.Buffer, *bytes.Buffer) {

	bRet := new(bytes.Buffer)
	bMsg := new(bytes.Buffer)

	//for i,v := range m {
	for i := 0; i < len(m); i++ {
		n, err := bRet.Write(m[i])
		util_err.Err_log(err)
		bMsg.WriteString(" lp" + util.Itos(i) + ": writing " + util.Itos(n) + " bytes: \n")
	}
	return bRet, bMsg
}
Exemplo n.º 2
0
// based on bytes.Buffer and Writing into it
func VVByte_to_string(m [][]byte) (*bytes.Buffer, *bytes.Buffer) {

	lg, b := loghttp.BuffLoggerUniversal(nil, nil)
	_ = b

	bRet := new(bytes.Buffer)
	bMsg := new(bytes.Buffer)

	//for i,v := range m {
	for i := 0; i < len(m); i++ {
		n, err := bRet.Write(m[i])
		lg(err)
		bMsg.WriteString(" lp" + util.Itos(i) + ": writing " + util.Itos(n) + " bytes: \n")
	}
	return bRet, bMsg
}
Exemplo n.º 3
0
//  datastore key for a single shard
//  We want an equal distribuation of the keys.
//  We want to avoid "clustering" of datastore "tablet servers"
//  But the mapping still needs to be deterministic
func keySingleShard(valName string, shardKey int) string {
	prefix := ""
	iter := shardKey
	for {
		mod := iter % 24
		r1 := mod + 'a'
		prefix += fmt.Sprintf("%c", r1)
		iter = iter / 24
		if iter < 24 {
			break
		}
	}
	return prefix + "__" + valName + "__" + util.Itos(shardKey)
}
Exemplo n.º 4
0
func String_to_VVByte(base64_img string) ([][]byte, *bytes.Buffer) {

	lg, _ := loghttp.BuffLoggerUniversal(nil, nil)

	bMsg := new(bytes.Buffer)

	const chunksize = 400 //

	var size_o int
	if len(base64_img)%chunksize == 0 {
		size_o = len(base64_img) / chunksize
	} else {
		size_o = len(base64_img)/chunksize + 1
	}

	VVByte := make([][]byte, size_o)

	cntr := -1
	b := make([]byte, chunksize)
	rdr := strings.NewReader(base64_img)
	for {
		cntr++
		n, err := rdr.Read(b)
		if err == io.EOF {
			break
		}
		lg(err)
		if n < 1 {
			break
		}

		indep_copy := make([]byte, n)
		copy(indep_copy, b)
		VVByte[cntr] = indep_copy

		bMsg.WriteString("reading " + util.Itos(n) + " bytes:\n")
		//bMsg.Write(  VVByte[util.Itos(cntr)]  )
	}

	return VVByte, bMsg

}
Exemplo n.º 5
0
// McacheSet is a generic memcache saving function.
// It takes scalars as well as structs.
//
// Integers and strings   are put into the memcache Value []byte
// structs			      are put into the memcache *Object* - using memcache.JSON
// Todo: types WrapString and WrapInt should be handled like string/int
//
// Scalars are tentatively saved using the CAS (compare and save) methods
func McacheSet(c appengine.Context, skey string, str_int_struct interface{}) {

	var err error
	var val string

	tMold := reflect.TypeOf(str_int_struct)
	stMold := tMold.Name()                     // strangely this is empty
	stMold = fmt.Sprintf("%T", str_int_struct) // unlike this

	if stMold != "int" &&
		stMold != "string" &&
		stMold != "dsu.WrapInt" &&
		stMold != "dsu.WrapString" {
		// struct - save it with JSON encoder
		n := tMold.NumField()
		_ = n
		miPut := &memcache.Item{
			Key:        skey,
			Value:      []byte(tMold.Name()), // sadly - value is ignored
			Object:     &str_int_struct,
			Expiration: 3600 * time.Second,
		}
		memcache.JSON.Set(c, miPut)
		c.Infof("mcache set obj key %v[%s]  - err %v", skey, stMold, err)

	} else {
		// scalar value - save it
		switch chamaeleon := str_int_struct.(type) {
		default:
			panic(fmt.Sprintf("only string or int - instead: -%T", str_int_struct))
		case nil:
			val = ""
		case WrapString:
			val = chamaeleon.S
		case string:
			val = chamaeleon
		case int:
			val = util.Itos(chamaeleon)
		case WrapInt:
			val = util.Itos(chamaeleon.I)
		}

		/*
			This is a Compare and Set (CAS) implementation of "set".
			It implements optimistic locking.

			We fetch the item first, then modify it, then put it back.
			We rely on the hidden "casID" of the memcache item,
				to detect intermittent changes by competitors.

			Biggest downside is the additional roundtrip for the fetch.
			Second downside: We should implement a retry after failure.
				Instead I resorted to a simple "SET"

			Upside: Prevention of race conditions.
				But race conditions only matter if newval = f(oldval)
				Otherwise last one wins should apply anyway.

		*/

		maxTries := 3

		miCas, eget := memcache.Get(c, skey) // compare and swap

		for i := 0; i <= maxTries; i++ {

			if i == maxTries {
				panic(fmt.Sprintf("memcache set CAS failed after %v attempts", maxTries))
			}

			var eput error
			var putMode = ""
			if eget != memcache.ErrCacheMiss {
				putMode = "CAS"
				miCas.Value = []byte(val)
				eput = memcache.CompareAndSwap(c, miCas)
			} else {
				putMode = "ADD"
				miCas := &memcache.Item{
					Key:   skey,
					Value: []byte(val),
				}
				eput = memcache.Add(c, miCas)
			}

			if eput == memcache.ErrCASConflict {
				c.Errorf("\t memcache CAS  FAILED - concurrent update?")
				// we brutally fallback to set():
				miCas := &memcache.Item{
					Key:   skey,
					Value: []byte(val),
				}
				eset := memcache.Set(c, miCas)
				util_err.Err_log(eset)
				time.Sleep(10 * time.Millisecond)
				continue
			}
			if eput == memcache.ErrNotStored {
				c.Errorf("\t memcache save FAILED - no idea why it would")
				time.Sleep(10 * time.Millisecond)
				continue
			}

			c.Infof("mcache set scalar %v[%T]=%v - mode %v - eget/eput: %v/%v",
				skey, str_int_struct, val, putMode, eget, eput)
			break
		}

	}

}