コード例 #1
0
func (g *Goon) putMemcache(srcs []interface{}, exists []byte) error {
	items := make([]*memcache.Item, len(srcs))
	payloadSize := 0
	for i, src := range srcs {
		toSerialize := src
		if exists[i] == 0 {
			toSerialize = nil
		}
		data, err := serializeStruct(toSerialize)
		if err != nil {
			g.error(err)
			return err
		}
		key, _, err := g.getStructKey(src)
		if err != nil {
			return err
		}
		// payloadSize will overflow if we push 2+ gigs on a 32bit machine
		payloadSize += len(data)
		items[i] = &memcache.Item{
			Key:   memkey(key),
			Value: data,
		}
	}
	memcacheTimeout := MemcachePutTimeoutSmall
	if payloadSize >= MemcachePutTimeoutThreshold {
		memcacheTimeout = MemcachePutTimeoutLarge
	}
	errc := make(chan error)
	go func() {
		tc, cf := context.WithTimeout(g.Context, memcacheTimeout)
		errc <- memcache.SetMulti(tc, items)
		cf()
	}()
	g.putMemoryMulti(srcs, exists)
	err := <-errc
	if appengine.IsTimeoutError(err) {
		g.timeoutError(err)
		err = nil
	} else if err != nil {
		g.error(err)
	}
	return err
}
コード例 #2
0
ファイル: controllers.go プロジェクト: flowlo/coduno-api
// deal makes the response in error cases somewhat nicer. It will try
// to figure out what actually went wrong and inform the user.
// It should not be called if the request went fine. If status is below
// 400, and err is not nil, it will assume an internal server error.
// Generally, if you pass a nil error, don't expect deal to do anything
// useful.
func deal(ctx context.Context, w http.ResponseWriter, r *http.Request, status int, err error) {
	// Getting an error and a status code blow 400 is somewhat paradox.
	// Also, if the status is the zero value, assume that we're dealing
	// with an internal server error.
	if err != nil && status < 400 || status == 0 {
		status = http.StatusInternalServerError
	}

	msg := "Sorry, Coduno encountered an error: " + http.StatusText(status) + "\n\n"
	msg += "If you think this is a bug, please consider filing\n"
	msg += "it at https://github.com/coduno/api/issues\n\n"

	if ctx != nil {
		msg += "Your request ID is " + appengine.RequestID(ctx) + " (important to track down what went wrong)\n\n"
	}

	// If we don't have an error it's really hard to make sense.
	if err == nil {
		w.WriteHeader(status)
		w.Write([]byte(msg))
		return
	}

	if t, ok := err.(trace); ok {
		msg += "Trace:\n"
		msg += strings.Replace(string(t.t), "\n", "\n\t", -1)
		msg += "\n"
		err = t.e
	}

	if appengine.IsOverQuota(err) {
		msg += "Reason: Over Quota"
	} else if appengine.IsTimeoutError(err) {
		msg += "Reason: Timeout Error"
	} else {
		msg += fmt.Sprintf("Reason: %s", err)
	}

	w.WriteHeader(status)
	w.Write([]byte(msg))
}
コード例 #3
0
// GetMulti is a batch version of Get.
//
// dst must be a *[]S, *[]*S, *[]I, []S, []*S, or []I, for some struct type S,
// or some interface type I. If *[]I or []I, each element must be a struct pointer.
func (g *Goon) GetMulti(dst interface{}) error {
	keys, err := g.extractKeys(dst, false) // don't allow incomplete keys on a Get request
	if err != nil {
		return err
	}

	v := reflect.Indirect(reflect.ValueOf(dst))

	if g.inTransaction {
		// todo: support getMultiLimit in transactions
		return datastore.GetMulti(g.Context, keys, v.Interface())
	}

	var dskeys []*datastore.Key
	var dsdst []interface{}
	var dixs []int

	var memkeys []string
	var mixs []int

	g.cacheLock.RLock()
	for i, key := range keys {
		m := memkey(key)
		vi := v.Index(i)

		if vi.Kind() == reflect.Struct {
			vi = vi.Addr()
		}

		if s, present := g.cache[m]; present {
			if vi.Kind() == reflect.Interface {
				vi = vi.Elem()
			}

			reflect.Indirect(vi).Set(reflect.Indirect(reflect.ValueOf(s)))
		} else {
			memkeys = append(memkeys, m)
			mixs = append(mixs, i)
			dskeys = append(dskeys, key)
			dsdst = append(dsdst, vi.Interface())
			dixs = append(dixs, i)
		}
	}
	g.cacheLock.RUnlock()

	if len(memkeys) == 0 {
		return nil
	}

	multiErr, any := make(appengine.MultiError, len(keys)), false

	tc, cf := context.WithTimeout(g.Context, MemcacheGetTimeout)
	memvalues, err := memcache.GetMulti(tc, memkeys)
	cf()
	if appengine.IsTimeoutError(err) {
		g.timeoutError(err)
		err = nil
	} else if err != nil {
		g.error(err) // timing out or another error from memcache isn't something to fail over, but do log it
		// No memvalues found, prepare the datastore fetch list already prepared above
	} else if len(memvalues) > 0 {
		// since memcache fetch was successful, reset the datastore fetch list and repopulate it
		dskeys = dskeys[:0]
		dsdst = dsdst[:0]
		dixs = dixs[:0]
		// we only want to check the returned map if there weren't any errors
		// unlike the datastore, memcache will return a smaller map with no error if some of the keys were missed

		for i, m := range memkeys {
			d := v.Index(mixs[i]).Interface()
			if v.Index(mixs[i]).Kind() == reflect.Struct {
				d = v.Index(mixs[i]).Addr().Interface()
			}
			if s, present := memvalues[m]; present {
				err := deserializeStruct(d, s.Value)
				if err == datastore.ErrNoSuchEntity {
					any = true // this flag tells GetMulti to return multiErr later
					multiErr[mixs[i]] = err
				} else if err != nil {
					g.error(err)
					return err
				} else {
					g.putMemory(d)
				}
			} else {
				dskeys = append(dskeys, keys[mixs[i]])
				dsdst = append(dsdst, d)
				dixs = append(dixs, mixs[i])
			}
		}
		if len(dskeys) == 0 {
			if any {
				return realError(multiErr)
			}
			return nil
		}
	}

	goroutines := (len(dskeys)-1)/getMultiLimit + 1
	var wg sync.WaitGroup
	wg.Add(goroutines)
	for i := 0; i < goroutines; i++ {
		go func(i int) {
			defer wg.Done()
			var toCache []interface{}
			var exists []byte
			lo := i * getMultiLimit
			hi := (i + 1) * getMultiLimit
			if hi > len(dskeys) {
				hi = len(dskeys)
			}
			gmerr := datastore.GetMulti(g.Context, dskeys[lo:hi], dsdst[lo:hi])
			if gmerr != nil {
				any = true // this flag tells GetMulti to return multiErr later
				merr, ok := gmerr.(appengine.MultiError)
				if !ok {
					g.error(gmerr)
					for j := lo; j < hi; j++ {
						multiErr[j] = gmerr
					}
					return
				}
				for i, idx := range dixs[lo:hi] {
					if merr[i] == nil {
						toCache = append(toCache, dsdst[lo+i])
						exists = append(exists, 1)
					} else {
						if merr[i] == datastore.ErrNoSuchEntity {
							toCache = append(toCache, dsdst[lo+i])
							exists = append(exists, 0)
						}
						multiErr[idx] = merr[i]
					}
				}
			} else {
				toCache = append(toCache, dsdst[lo:hi]...)
				exists = append(exists, bytes.Repeat([]byte{1}, hi-lo)...)
			}
			if len(toCache) > 0 {
				if err := g.putMemcache(toCache, exists); err != nil {
					g.error(err)
					// since putMemcache() gives no guarantee it will actually store the data in memcache
					// we log and swallow this error
				}

			}
		}(i)
	}
	wg.Wait()
	if any {
		return realError(multiErr)
	}
	return nil
}
コード例 #4
0
ファイル: info.go プロジェクト: nishanths/gae
func (g giImpl) IsTimeoutError(err error) bool {
	return appengine.IsTimeoutError(err)
}
コード例 #5
0
ファイル: database.go プロジェクト: golang/gddo
// Reindex gets all the packages in database and put them into the search index.
// This will update the search index with the path, synopsis, score, import counts
// of all the packages in the database.
func (db *Database) Reindex(ctx context.Context) error {
	c := db.Pool.Get()
	defer c.Close()

	idx, err := search.Open("packages")
	if err != nil {
		return fmt.Errorf("database: failed to open packages: %v", err)
	}
	npkgs := 0
	for {
		// Get 200 packages from the nextCrawl set each time. Use npkgs as a cursor
		// to store the current position we actually indexed. Retry from the cursor
		// position if we received a timeout error from app engine.
		values, err := redis.Values(c.Do(
			"SORT", "nextCrawl",
			"LIMIT", strconv.Itoa(npkgs), "200",
			"GET", "pkg:*->path",
			"GET", "pkg:*->synopsis",
			"GET", "pkg:*->score",
		))
		if err != nil {
			return err
		}
		if len(values) == 0 {
			break // all done
		}

		// The Search API should support put in batches of up to 200 documents,
		// the Go version of this API does not support this yet.
		// TODO(shantuo): Put packages in batch operations.
		for ; len(values) > 0; npkgs++ {
			var pdoc doc.Package
			var score float64
			values, err = redis.Scan(values, &pdoc.ImportPath, &pdoc.Synopsis, &score)
			if err != nil {
				return err
			}
			// There are some corrupted data in our current database
			// that causes an error when putting the package into the
			// search index which only supports UTF8 encoding.
			if !utf8.ValidString(pdoc.Synopsis) {
				pdoc.Synopsis = ""
			}
			id, n, err := pkgIDAndImportCount(c, pdoc.ImportPath)
			if err != nil {
				return err
			}
			if _, err := idx.Put(ctx, id, &Package{
				Path:        pdoc.ImportPath,
				Synopsis:    pdoc.Synopsis,
				Score:       score,
				ImportCount: n,
			}); err != nil {
				if appengine.IsTimeoutError(err) {
					log.Printf("App Engine timeout: %v. Continue...", err)
					break
				}
				return fmt.Errorf("Failed to put index %s: %v", id, err)
			}
		}
	}
	log.Printf("%d packages are reindexed", npkgs)
	return nil
}