Esempio n. 1
0
func Handle(c HTTPContextLogger, f func() error, scopes ...string) {
	defer func() {
		if e := recover(); e != nil {
			c.Errorf("PANIC\n%v\nRequest: %+v\nStack: %s", e, c.Req(), utils.Stack())
			panic(e)
		}
	}()
	err := c.CheckScopes(scopes)
	if err == nil {
		err = f()
	}
	if err != nil {
		if errResponse, ok := err.(Responder); ok {
			if err2 := errResponse.Respond(c); err2 != nil {
				c.Resp().WriteHeader(500)
				fmt.Fprintf(c.Resp(), html.EscapeString(fmt.Sprintf("Unable to render the proper error %+v: %v", err, err2)))
			}
		} else {
			c.Resp().WriteHeader(500)
			fmt.Fprintf(c.Resp(), html.EscapeString(fmt.Sprintf("%v", err)))
		}
		if c.Resp().Status() >= 500 {
			c.Errorf("%v\n%v\n\n", c.Req().URL, err)
		} else {
			c.Warningf("%v\n%v\n\n", c.Req().URL, err)
		}
		stackErr := errors.Wrap(err, 1)
		c.Infof("%s", stackErr.ErrorStack())
	}
}
Esempio n. 2
0
func run(c gaecontext.HTTPContext, f func(c gaecontext.HTTPContext)) {
	defer func() {
		if e := recover(); e != nil {
			msg := fmt.Sprintf("Failed: %v\n%s", e, utils.Stack())
			c.Infof("%v", msg)
			c.Resp().WriteHeader(500)
			fmt.Fprintln(c.Resp(), msg)
		}
	}()
	c.Infof("Running %v", runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name())
	f(c)
	c.Infof("Pass")
}
Esempio n. 3
0
func NewError(status int, body interface{}, info string, cause error) (result HTTPError) {
	result = HTTPError{
		Status: status,
		Body:   body,
		Cause:  cause,
		Info:   info,
	}

	if ErrorStackTraces && status >= 500 {
		result.Stack = utils.Stack()
	}
	return
}
Esempio n. 4
0
/*
Lock will try to lock this KeyLock and make its Id (and the value it is based on) unavailable for other locks.
*/
func (self *KeyLock) Lock(c GAEContext) error {
	snapshot := *self
	return c.Transaction(func(c GAEContext) (err error) {
		*self = snapshot
		existingLock := &KeyLock{Id: self.Id}
		err = gae.GetById(c, existingLock)
		if _, ok := err.(gae.ErrNoSuchEntity); ok {
			err = nil
		} else if err == nil {
			err = ErrLockTaken{
				Key:    self.Id,
				Entity: existingLock.Entity,
				Stack:  utils.Stack(),
			}
		}
		if err != nil {
			return
		}
		err = gae.Put(c, self)
		return
	}, false)
}
Esempio n. 5
0
/*
Transaction will run f inside a transaction, optionally crossGroup (more than 1 but LESS THAN FIVE entity groups involved).

If it fails due to other concurrent transactions, it will retry this transaction up until 20 seconds have passed.
*/
func (self *DefaultContext) Transaction(f interface{}, crossGroup bool) (err error) {
	if self.inTransaction {
		return CallTransactionFunction(self, f)
	}
	var newContext DefaultContext
	/*
	 * Instead of retrying 3 times, something that we see fail multible times, try
	 * get transaction working waiting for max 20 seconds.
	 */
	start := time.Now()
	tries := 0
	for time.Since(start) < (time.Second * 20) {
		hasConcErr := false
		err = datastore.RunInTransaction(self, func(c appengine.Context) error {
			newContext = *self
			newContext.Context = c
			newContext.inTransaction = true
			return CallTransactionFunction(&newContext, f)
		}, &datastore.TransactionOptions{XG: crossGroup})
		if err == nil {
			break
		}
		/* Dont fail on concurrent transaction.. Continue trying... */
		if dserr, ok := err.(*errors.Error); ok {
			// our own stack errors, based on a concurrent transaction error
			if dserr.Err == datastore.ErrConcurrentTransaction {
				hasConcErr = true
			} else {
				// if they are based on appengine or utils multierrors, check for concurrency errors inside
				if merr, ok := dserr.Err.(appengine.MultiError); ok {
					for _, e := range merr {
						if e == datastore.ErrConcurrentTransaction {
							hasConcErr = true
							break
						}
					}
				} else if merr, ok := dserr.Err.(utils.MultiError); ok {
					for _, e := range merr {
						if e == datastore.ErrConcurrentTransaction {
							hasConcErr = true
							break
						}
					}
				}
			}
		} else if err == datastore.ErrConcurrentTransaction {
			// or if they ARE concurrency errors!
			hasConcErr = true
		}
		if !hasConcErr && strings.Contains(strings.ToLower(err.Error()), "concurrent") {
			// or, if they are the special black ops concurrency errors that google never talk openly about
			hasConcErr = true
		}
		if !hasConcErr && strings.Contains(strings.ToLower(err.Error()), "transaction closed") {
			// or, they are the even more magical "transaction closed" errors that don't even know about the cause why it was closed
			hasConcErr = true
		}
		if hasConcErr {
			lines := strings.Split(utils.Stack(), "\n")
			self.Debugf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DANGER ! Failed to run %v in transaction due to %v, retrying... !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", lines[9:11], err)
			tries += 1
			time.Sleep(time.Millisecond * time.Duration(rand.Int63()%int64(500*tries)))
		} else {
			break
		}
	}
	if err != nil {
		return
	}

	// After transaction sucessfull, run all the AfterTransaction registered callbacks.
	var multiErr appengine.MultiError
	for _, cb := range newContext.afterTransaction {
		if err := cb(self); err != nil {
			multiErr = append(multiErr, err)
		}
	}
	if len(multiErr) > 0 {
		err = multiErr
	}
	return
}
Esempio n. 6
0
/*
memoizeMulti will look for all provided keys, and load them into the destinationPointers.

Any missing values will be generated using the generatorFunctions and put in memcache with a duration timeout.

If cacheNil is true, nil results or memcache.ErrCacheMiss errors from the generator function will be cached.

It returns a slice of bools that show whether each value was found (either from memcache or from the genrator function).
*/
func memoizeMulti(
	c TransactionContext,
	keys []string,
	cacheNil bool,
	destinationPointers []interface{},
	generatorFunctions []func() (interface{}, time.Duration, error)) (errors appengine.MultiError) {

	// First generate memcache friendly key hashes from all the provided keys.
	keyHashes := make([]string, len(keys))
	for index, key := range keys {
		k, err := Keyify(key)
		if err != nil {
			errors = appengine.MultiError{err}
			return
		}
		keyHashes[index] = k
	}

	// Then, run a memGetMulti using these keys, and warn if it is slow.
	t := time.Now()
	var items []*memcache.Item
	items, errors = memGetMulti(c, keyHashes, destinationPointers)
	if d := time.Now().Sub(t); d > time.Millisecond*10 {
		c.Debugf("SLOW memGetMulti(%v): %v", keys, d)
	}

	// Create a channel to handle any panics produced by the concurrent code.
	panicChan := make(chan interface{}, len(items))

	// For all the items we tried to fetch...
	for i, item := range items {

		// set up variables to use in the iteration
		index := i
		err := errors[index]
		keyHash := keyHashes[index]
		destinationPointer := destinationPointers[index]
		if err == memcache.ErrCacheMiss {
			// for cache misses, background do..
			go func() (err error) {
				// defer fetching any panics and sending them to the panic channel
				defer func() {
					errors[index] = err
					if e := recover(); e != nil {
						c.Infof("Panic: %v", e)
						panicChan <- fmt.Errorf("%v\n%v", e, utils.Stack())
					} else {
						// no panics will send a nil, which is necessary since we wait for all goroutines to send SOMETHING on the channel
						panicChan <- nil
					}
				}()
				var result interface{}
				var duration time.Duration
				found := true
				// try to run the generator function
				if result, duration, err = generatorFunctions[index](); err != nil {
					if err != memcache.ErrCacheMiss {
						return
					} else {
						// ErrCacheMiss from the generator function means that we want the caller to think there is no data to return
						found = false
					}
				} else {
					// if there is no error, check if we got a nil
					found = !utils.IsNil(result)
					if !found {
						// if we did, we fake an ErrCacheMiss
						err = memcache.ErrCacheMiss
					}
				}
				// If we are not inside a transaction, we have to store the result in memcache
				if !c.InTransaction() && (found || cacheNil) {
					obj := result
					var flags uint32
					if !found {
						// if the generator responded with nil or a cache miss, flag this cache entry as a cache miss for future reference
						obj = reflect.Indirect(reflect.ValueOf(destinationPointer)).Interface()
						flags = nilCache
					}
					if err2 := codecSetWithRetry(c, Codec, &memcache.Item{
						Key:        keyHash,
						Flags:      flags,
						Object:     obj,
						Expiration: duration,
					}); err2 != nil {
						// We've successfully fetched the data, we failed storing it in memcache, log it and continue
						c.Errorf("Failed storing to memcache, %v", err2)
						return
					}
				}
				if found {
					// if we actually found something, copy the result to the destination
					utils.ReflectCopy(result, destinationPointer)
				}
				return
			}()
		} else if err != nil {
			// any errors will bubble up the panic channel
			panicChan <- nil
		} else {
			// if we FOUND something, but it was flagged as a cache miss, fake a cache miss
			if item.Flags&nilCache == nilCache {
				errors[index] = memcache.ErrCacheMiss
			}
			panicChan <- nil
		}
	}

	// collect any panics, and raise them if we found any
	panics := []interface{}{}
	for _, _ = range items {
		if e := <-panicChan; e != nil {
			panics = append(panics, e)
		}
	}
	if len(panics) > 0 {
		panic(panics)
	}
	return
}
Esempio n. 7
0
/*
DocHandler will return a handler that renders the documentation for all routes registerd with DocHandle.

The resulting func will do this by going through each route in DocumentedRoutes and render the endpoint
using the provided template, providing it template functions to render separate endpoints, types, sub types
and examples of types.
*/
func DocHandler(templ *template.Template) http.Handler {
	return httpcontext.HandlerFunc(func(c httpcontext.HTTPContextLogger) (err error) {
		c.Resp().Header().Set("Content-Type", "text/html; charset=UTF-8")
		// we define a func to render a type
		// it basically just executes the "TypeTemplate" with the provided
		// stack to avoid infinite recursion
		renderType := func(t JSONType, stack []string) (result string, err error) {
			// if the type is already mentioned in one of the parents we have already mentioned,
			// bail
			for _, parent := range stack {
				if parent != "" && parent == t.ReflectType.Name() {
					result = fmt.Sprintf("[loop protector enabled, render stack: %v]", stack)
					return
				}
			}
			stack = append(stack, t.ReflectType.Name())
			buf := &bytes.Buffer{}
			// then execute the TypeTemplate with this type and this stack
			if err = templ.ExecuteTemplate(buf, "TypeTemplate", map[string]interface{}{
				"Type":  t,
				"Stack": stack,
			}); err != nil {
				return
			}
			result = buf.String()
			return
		}

		// routes are documented alphabetically
		sort.Sort(routes)
		// define all the functions that we left empty earlier
		err = templ.Funcs(map[string]interface{}{
			"RenderEndpoint": func(r DocumentedRoute) (string, error) {
				return r.Render(templ.Lookup("EndpointTemplate"))
			},
			"RenderSubType": func(t JSONType, stack []string) (result string, err error) {
				return renderType(t, stack)
			},
			"RenderType": func(t JSONType) (result string, err error) {
				return renderType(t, nil)
			},
			"First": first,
			"Example": func(r JSONType) (result string, err error) {
				// this will render an example of the provided JSONType
				defer func() {
					if e := recover(); e != nil {
						result = fmt.Sprintf("%v\n%s", e, utils.Stack())
					}
				}()
				x := utils.Example(r.ReflectType)
				b, err := json.MarshalIndent(x, "", "  ")
				if err != nil {
					return
				}
				if len(r.Fields) > 0 {
					var i interface{}
					if err = json.Unmarshal(b, &i); err != nil {
						return
					}
					if m, ok := i.(map[string]interface{}); ok {
						newMap := map[string]interface{}{}
						for k, v := range m {
							if _, found := r.Fields[k]; found {
								newMap[k] = v
							}
						}
						if b, err = json.MarshalIndent(newMap, "", "  "); err != nil {
							return
						}
					}
				}
				result = string(b)
				return
			},
		}).Execute(c.Resp(), map[string]interface{}{
			"Endpoints": routes,
		})
		return
	})
}