func (s *SyncUserHandler) hBsoGET(w http.ResponseWriter, r *http.Request) { if !AcceptHeaderOk(w, r) { return } var ( bId string ok bool cId int err error bso *syncstorage.BSO ) if bId, ok = extractBsoIdFail(w, r); !ok { return } cId, err = s.getcid(r, false) if err != nil { if err == syncstorage.ErrNotFound { sendRequestProblem(w, r, http.StatusNotFound, errors.Wrap(err, "Collection Not Found")) } else { InternalError(w, r, err) } return } if bso, err = s.db.GetBSO(cId, bId); err == nil { if sentNotModified(w, r, bso.Modified) { return } log.WithFields(log.Fields{ "bso_t": bso.TTL, "now": syncstorage.Now(), "diff": syncstorage.Now() - bso.TTL, }).Debug("bso-expired") m := syncstorage.ModifiedToString(bso.Modified) w.Header().Set("X-Last-Modified", m) JsonNewline(w, r, bso) } else { if err == syncstorage.ErrNotFound { sendRequestProblem(w, r, http.StatusNotFound, errors.Wrap(err, "BSO Not Found")) } else { InternalError(w, r, err) } } }
func (s *SyncUserHandler) hCollectionDELETE(w http.ResponseWriter, r *http.Request) { cId, err := s.getcid(r, false) if err != nil { if err == syncstorage.ErrNotFound { w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, `{"modified":%s}`, syncstorage.ModifiedToString(syncstorage.Now())) return } else { InternalError(w, r, err) } return } cmodified, err := s.db.GetCollectionModified(cId) if err != nil { InternalError(w, r, err) return } else if sentNotModified(w, r, cmodified) { return } modified := syncstorage.Now() bids, idExists := r.URL.Query()["ids"] if idExists { bidlist := strings.Split(bids[0], ",") if len(bidlist) > s.config.MaxPOSTRecords { sendRequestProblem(w, r, http.StatusBadRequest, errors.New("Exceeded max batch size")) return } modified, err = s.db.DeleteBSOs(cId, bidlist...) if err != nil { InternalError(w, r, err) return } } else { err = s.db.DeleteCollection(cId) if err != nil { InternalError(w, r, err) return } } w.Header().Set("Content-Type", "application/json") fmt.Fprintf(w, `{"modified":%s}`, syncstorage.ModifiedToString(modified)) }
func (s *SyncUserHandler) hDeleteEverything(w http.ResponseWriter, r *http.Request) { err := s.db.DeleteEverything() if err != nil { InternalError(w, r, err) } else { m := syncstorage.ModifiedToString(syncstorage.Now()) w.Header().Set("Content-Type", "text/plain") w.Header().Set("X-Last-Modified", m) w.Write([]byte(m)) } }
// RequestToPostBSOInput extracts and unmarshals request.Body into a syncstorage.PostBSOInput. It // returns a PostResults as well since it also validates BSOs func RequestToPostBSOInput(r *http.Request, maxPayloadSize int) ( syncstorage.PostBSOInput, *syncstorage.PostResults, error, ) { // bsoToBeProcessed will actually get sent to the DB bsoToBeProcessed := syncstorage.PostBSOInput{} results := syncstorage.NewPostResults(syncstorage.Now()) // a list of all the raw json encoded BSOs var raw []json.RawMessage if ct := getMediaType(r.Header.Get("Content-Type")); ct == "application/json" || ct == "text/plain" { decoder := json.NewDecoder(r.Body) err := decoder.Decode(&raw) if err != nil { return nil, nil, errors.Wrap(err, "Could not unmarshal Request body") } } else { // deal with application/newlines raw = ReadNewlineJSON(r.Body) } for _, rawJSON := range raw { var b syncstorage.PutBSOInput if parseErr := parseIntoBSO(rawJSON, &b); parseErr == nil { if b.Payload != nil && len(*b.Payload) > maxPayloadSize { results.AddFailure(b.Id, "Payload too large") } else { bsoToBeProcessed = append(bsoToBeProcessed, &b) } } else { // couldn't parse a BSO into something real // abort immediately if parseErr.field == "-" { // json error, not an object return nil, nil, errors.Wrap(parseErr, "Could not unmarshal BSO") } results.AddFailure(parseErr.bId, fmt.Sprintf("invalid %s", parseErr.field)) } } // change TTL from seconds (what clients sends) // to milliseconds (what the DB uses) for _, p := range bsoToBeProcessed { if p.TTL != nil { tmp := *p.TTL * 1000 p.TTL = &tmp } } return bsoToBeProcessed, results, nil }
func testtoken(secret string, uid uint64) token.Token { node := "https://syncnode-12345.services.mozilla.com" payload := token.TokenPayload{ Uid: uid, Node: node, Expires: float64(syncstorage.Now()+60) / 1000, Salt: "pacific", FxaUID: "fxa_" + strconv.FormatUint(uid, 10), DeviceId: "device_" + strconv.FormatUint(uid, 10), } tok, err := token.NewToken([]byte(secret), payload) if err != nil { panic(err) } return tok }
// hCollectionPOSTBatch handles batch=? requests. It is called internally by hCollectionPOST // to handle batch request logic func (s *SyncUserHandler) hCollectionPOSTBatch(collectionId int, w http.ResponseWriter, r *http.Request) { // CHECK client provided headers to quickly determine if batch exceeds limits // this is meant to be a cheap(er) check without actually having to parse the // data provided by the user for _, headerName := range []string{"X-Weave-Total-Records", "X-Weave-Total-Bytes", "X-Weave-Records", "X-Weave-Bytes"} { if strVal := r.Header.Get(headerName); strVal != "" { if intVal, err := strconv.Atoi(strVal); err == nil { max := 0 switch headerName { case "X-Weave-Total-Records": max = s.config.MaxTotalRecords case "X-Weave-Total-Bytes": max = s.config.MaxTotalBytes case "X-Weave-Bytes": max = s.config.MaxPOSTBytes case "X-Weave-Records": max = s.config.MaxPOSTRecords } if intVal > max { WeaveSizeLimitExceeded(w, r, errors.Errorf("Limit %s exceed. %d/%d", headerName, intVal, max)) return } } else { // header value is invalid (not an int) sendRequestProblem(w, r, http.StatusBadRequest, errors.Errorf("Invalid integer value for %s", headerName)) return } } } // CHECK the POST size, if possible from client supplied data // hopefully shortcut a fail if this exceeds limits if r.ContentLength > 0 && r.ContentLength > int64(s.config.MaxPOSTBytes) { WeaveSizeLimitExceeded(w, r, errors.Errorf("MaxPOSTBytes exceeded in request.ContentLength(%d) > %d", r.ContentLength, s.config.MaxPOSTBytes)) return } // EXTRACT actual data to check bsoToBeProcessed, results, err := RequestToPostBSOInput(r, s.config.MaxRecordPayloadBytes) if err != nil { WeaveInvalidWBOError(w, r, errors.Wrap(err, "Failed turning POST body into BSO work list")) return } // CHECK actual BSOs sent to see if they exceed limits if len(bsoToBeProcessed) > s.config.MaxPOSTRecords { sendRequestProblem(w, r, http.StatusRequestEntityTooLarge, errors.Errorf("Exceeded %d BSO per request", s.config.MaxPOSTRecords)) return } // CHECK BSO decoding validation errors. Don't even start a Batch if there are. if len(results.Failed) > 0 { modified := syncstorage.Now() w.Header().Set("X-Last-Modified", syncstorage.ModifiedToString(modified)) JsonNewline(w, r, &PostResults{ Modified: modified, Success: nil, Failed: results.Failed, }) return } // Get batch id, commit command and internal collection Id _, batchId, batchCommit := GetBatchIdAndCommit(r) // CHECK batch id is valid for appends. Do this before loading and decoding // the body to be more efficient. if batchId != "true" { id, err := batchIdInt(batchId) if err != nil { sendRequestProblem(w, r, http.StatusBadRequest, errors.Wrap(err, "Invalid Batch ID Format")) return } if found, err := s.db.BatchExists(id, collectionId); err != nil { InternalError(w, r, err) } else if !found { sendRequestProblem(w, r, http.StatusBadRequest, errors.Errorf("Batch id: %s does not exist", batchId)) } } filteredBSOs := make([]*syncstorage.PutBSOInput, 0, len(bsoToBeProcessed)) failures := make(map[string][]string) for _, putInput := range bsoToBeProcessed { var failId string var failReason string if !syncstorage.BSOIdOk(putInput.Id) { failId = "na" failReason = fmt.Sprintf("Invalid BSO id %s", putInput.Id) } if putInput.SortIndex != nil && !syncstorage.SortIndexOk(*putInput.SortIndex) { failId = putInput.Id failReason = fmt.Sprintf("Invalid sort index for: %s", putInput.Id) } if putInput.TTL != nil && !syncstorage.TTLOk(*putInput.TTL) { failId = putInput.Id failReason = fmt.Sprintf("Invalid TTL for: %s", putInput.Id) } if failReason != "" { if failures[failId] == nil { failures[failId] = []string{failReason} } else { failures[failId] = append(failures[failId], failReason) } continue } filteredBSOs = append(filteredBSOs, putInput) } // JSON Serialize the data for storage in the DB buf := new(bytes.Buffer) if len(filteredBSOs) > 0 { encoder := json.NewEncoder(buf) for _, bso := range filteredBSOs { if err := encoder.Encode(bso); err != nil { // Note: this writes a newline after each record // whoa... presumably should never happen InternalError(w, r, errors.Wrap(err, "Failed encoding BSO for payload")) return } } } // Save either as a new batch or append to an existing batch var dbBatchId int // - batchIdInt used to track the internal batchId number in the database after // - the create || append appendedOkIds := make([]string, 0, len(filteredBSOs)) if batchId == "true" { newBatchId, err := s.db.BatchCreate(collectionId, buf.String()) if err != nil { InternalError(w, r, errors.Wrap(err, "Failed creating batch")) return } dbBatchId = newBatchId } else { id, err := batchIdInt(batchId) if err != nil { sendRequestProblem(w, r, http.StatusBadRequest, errors.Wrap(err, "Invalid Batch ID Format")) return } if len(filteredBSOs) > 0 { // append only if something to do if err := s.db.BatchAppend(id, collectionId, buf.String()); err != nil { InternalError(w, r, errors.Wrap(err, fmt.Sprintf("Failed append to batch id:%d", dbBatchId))) return } } dbBatchId = id } // The success list only includes the BSO Ids in the // POST request. These need to kept to be sent back in // the response. Matches the python server's behaviour / passes // the tests for _, bso := range filteredBSOs { appendedOkIds = append(appendedOkIds, bso.Id) } if batchCommit { batchRecord, err := s.db.BatchLoad(dbBatchId, collectionId) if err != nil { InternalError(w, r, errors.Wrap(err, "Failed Loading Batch to commit")) return } rawJSON := ReadNewlineJSON(bytes.NewBufferString(batchRecord.BSOS)) // CHECK final data before committing it to the database numInBatch := len(rawJSON) if numInBatch > s.config.MaxTotalRecords { s.db.BatchRemove(dbBatchId) WeaveSizeLimitExceeded(w, r, errors.Errorf("Too many BSOs (%d) in Batch(%d)", numInBatch, dbBatchId)) return } postData := make(syncstorage.PostBSOInput, len(rawJSON), len(rawJSON)) for i, bsoJSON := range rawJSON { var bso syncstorage.PutBSOInput if parseErr := parseIntoBSO(bsoJSON, &bso); parseErr != nil { // well there is definitely a bug somewhere if this happens InternalError(w, r, errors.Wrap(parseErr, "Could not decode batch data")) return } else { postData[i] = &bso } } // CHECK that actual Batch data size sum := 0 for _, bso := range postData { if bso.Payload == nil { continue } sum := sum + len(*bso.Payload) if sum > s.config.MaxTotalBytes { s.db.BatchRemove(dbBatchId) WeaveSizeLimitExceeded(w, r, errors.Errorf("Batch size(%d) exceeded MaxTotalBytes limit(%d)", sum, s.config.MaxTotalBytes)) return } } postResults, err := s.db.PostBSOs(collectionId, postData) if err != nil { InternalError(w, r, err) return } // merge failures for key, reasons := range postResults.Failed { if failures[key] == nil { failures[key] = reasons } else { failures[key] = append(failures[key], reasons...) } } // DELETE the batch from the DB s.db.BatchRemove(dbBatchId) w.Header().Set("X-Last-Modified", syncstorage.ModifiedToString(postResults.Modified)) JsonNewline(w, r, &PostResults{ Modified: postResults.Modified, Success: appendedOkIds, Failed: failures, }) } else { modified := syncstorage.Now() w.Header().Set("X-Last-Modified", syncstorage.ModifiedToString(modified)) JsonNewlineStatus(w, r, http.StatusAccepted, &PostResults{ Batch: batchIdString(dbBatchId), Modified: modified, Success: appendedOkIds, Failed: failures, }) } }
func TestCacheHandlerInfoCollections(t *testing.T) { assert := assert.New(t) handler := NewCacheHandler(cacheMockHandler, DefaultCacheHandlerConfig) url := syncurl(uniqueUID(), "info/collections?version=1.5") // sometimes we see queries in the requests { // basic test to make sure data is what's expected and caching works resp := request("GET", url, nil, handler) assert.Equal(http.StatusOK, resp.Code) assert.True(strings.Contains(resp.Body.String(), `"Type":"info/collections"`)) assert.Equal("application/json", resp.Header().Get("Content-Type")) resp2 := request("GET", url, nil, handler) assert.Equal(http.StatusOK, resp2.Code) assert.Equal(resp.Body.String(), resp2.Body.String(), "Expected cached value") assert.Equal(resp.Header().Get("X-Last-Modified"), resp2.Header().Get("X-Last-Modified")) } // test POST, PUT, DELETE invalidates the cache { var last *httptest.ResponseRecorder for _, method := range []string{"POST", "PUT", "DELETE"} { if last == nil { last = request("GET", url, nil, handler) } // ensures things stay the same resp := request("GET", url, nil, handler) assert.Equal(http.StatusOK, resp.Code) assert.Equal(last.Body.String(), resp.Body.String()) assert.Equal(last.Header().Get("X-Last-Modified"), resp.Header().Get("X-Last-Modified")) // swap it last = resp // this should clear the cache // compensate for sync's time format.. time.Sleep(10 * time.Millisecond) request(method, url, nil, handler) _ = method //// make sure things are changed resp = request("GET", url, nil, handler) assert.Equal(http.StatusOK, resp.Code) assert.NotEqual(last.Body.String(), resp.Body.String(), "Should have changed") assert.NotEqual(last.Header().Get("X-Last-Modified"), resp.Header().Get("X-Last-Modified")) last = resp } } // test that different uids get different values { resp1a := request("GET", syncurl("123", "info/collections"), nil, handler) resp1b := request("GET", syncurl("123", "info/collections"), nil, handler) time.Sleep(10 * time.Millisecond) /// SYNNNC!! .. so awesome timestamps resp2b := request("GET", syncurl("456", "info/collections"), nil, handler) resp2a := request("GET", syncurl("456", "info/collections"), nil, handler) assert.Equal(http.StatusOK, resp1a.Code) assert.Equal(http.StatusOK, resp1b.Code) assert.Equal(http.StatusOK, resp2a.Code) assert.Equal(http.StatusOK, resp2b.Code) // cacheing works assert.Equal(resp1a.Body.String(), resp1b.Body.String()) assert.Equal(resp2a.Body.String(), resp2b.Body.String()) // no conflicts assert.NotEqual(resp1a.Body.String(), resp2a.Body.String()) } { // test with a real SyncUserHandler uid := "123456" db, _ := syncstorage.NewDB(":memory:", nil) userHandler := NewSyncUserHandler(uid, db, nil) handler := NewCacheHandler(userHandler, DefaultCacheHandlerConfig) collections := []string{ "clients", "crypto", "forms", "history", "keys", "meta", "bookmarks", "prefs", "tabs", "passwords", "addons", } for _, cName := range collections { cId, _ := db.GetCollectionId(cName) db.TouchCollection(cId, syncstorage.Now()+cId) // turn the cId into milliseconds } resp := request("GET", syncurl(uid, "info/collections"), nil, handler) resp2 := request("GET", syncurl(uid, "info/collections"), nil, handler) assert.Equal(resp.Body.String(), resp2.Body.String()) assert.Equal(resp.Header().Get("X-Last-Modified"), resp2.Header().Get("X-Last-Modified")) } }
"strings" "testing" "time" "github.com/mozilla-services/go-syncstorage/syncstorage" "github.com/mozilla-services/go-syncstorage/token" "github.com/stretchr/testify/assert" ) // generates a unique response each time so caching can be tested var cacheMockHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { resp := struct { Type string Value string }{"OK", syncstorage.ModifiedToString(syncstorage.Now())} w.Header().Set("X-Last-Modified", resp.Value) if infoCollectionsRoute.MatchString(req.URL.Path) { resp.Type = "info/collections" JSON(w, req, http.StatusOK, resp) } else if infoConfigurationRoute.MatchString(req.URL.Path) { resp.Type = "info/configuration" JSON(w, req, http.StatusOK, resp) } else { JSON(w, req, http.StatusOK, resp) } }) func TestCacheHandlerInfoCollections(t *testing.T) {