// End handles the terminate signals coming from client side func (mgoCache *CacheStore) End(u *url.URL, h http.Header, req *models.Ping, context *apimodels.Context) (int, http.Header, interface{}, error) { if err := validateOperation(req, context); err != nil { return response.NewBadRequest(err) } key := collaboration.PrepareFileKey(req.FileId) // when key is deleted, with the first ping received, collab will be ended if err := mgoCache.Delete(key); err != nil { return response.NewBadRequest(err) } // send the ping request to the related worker if err := bongo.B.PublishEvent(collaboration.FireEventName, req); err != nil { return response.NewBadRequest(err) } // send back the updated ping as response return response.NewOK(req) }
// Ping handles the pings coming from client side // // TOOD add throttling here func (mgoCache *CacheStore) Ping(u *url.URL, h http.Header, req *models.Ping, context *apimodels.Context) (int, http.Header, interface{}, error) { if err := validateOperation(req, context); err != nil { return response.NewBadRequest(err) } // set the last seen at time key := collaboration.PrepareFileKey(req.FileId) if err := mgoCache.SetEx(key, collaboration.ExpireSessionKeyDuration, req.CreatedAt.Unix()); err != nil { return response.NewBadRequest(err) } // send the ping request to the related worker if err := bongo.B.PublishEvent(collaboration.FireEventName, req); err != nil { return response.NewBadRequest(err) } // send back the updated ping as response return response.NewOK(req) }