func BenchmarkCacheHandler(b *testing.B) {
	uid := "123456"
	db, _ := syncstorage.NewDB(":memory:", nil)
	userHandler := NewSyncUserHandler(uid, db, nil)
	handler := NewCacheHandler(userHandler, DefaultCacheHandlerConfig)
	collections := []string{
		"clients", "crypto", "forms", "history", "keys", "meta",
		"bookmarks", "prefs", "tabs", "passwords", "addons",
	}
	for _, cName := range collections {
		cId, _ := db.GetCollectionId(cName)
		db.TouchCollection(cId, cId*1000) // turn the cId into milliseconds
	}

	req, err := http.NewRequest("GET", "/storage/12345/info/collections", nil)
	req.Header.Set("Accept", "application/json")
	if err != nil {
		panic(err)
	}
	session := &Session{Token: token.TokenPayload{Uid: 12345}}
	reqCtx := req.WithContext(NewSessionContext(req.Context(), session))

	for i := 0; i < b.N; i++ {
		w := httptest.NewRecorder()
		handler.ServeHTTP(w, reqCtx)
	}
}
func TestSyncUserHandlerPUT(t *testing.T) {
	t.Parallel()
	assert := assert.New(t)

	uid := "123456"

	db, _ := syncstorage.NewDB(":memory:", nil)
	handler := NewSyncUserHandler(uid, db, nil)
	url := syncurl(uid, "storage/bookmarks/bso0")
	header := make(http.Header)
	header.Add("Content-Type", "application/json")

	{ // test basic create / update flow
		body := bytes.NewBufferString(`{"payload": "1234"}`)
		resp := requestheaders("PUT", url, body, header, handler)
		if !assert.Equal(http.StatusOK, resp.Code) {
			return
		}

		colId, _ := db.GetCollectionId("bookmarks")

		bsoNew, _ := db.GetBSO(colId, "bso0")
		assert.Equal("1234", bsoNew.Payload)

		body2 := bytes.NewBufferString(`{"sortindex": 9, "ttl":1000}`)
		resp2 := requestheaders("PUT", url, body2, header, handler)
		if !assert.Equal(http.StatusOK, resp2.Code) {
			fmt.Println(resp2.Body.String())
			return
		}

		bsoUpdated, _ := db.GetBSO(colId, "bso0")
		assert.Equal("1234", bsoUpdated.Payload)
		assert.Equal(9, bsoUpdated.SortIndex)
		assert.NotEqual(bsoNew.Modified, bsoUpdated.Modified)
		assert.NotEqual(bsoNew.TTL, bsoUpdated.TTL)
	}

	{ // test payload too large w/ a very small limit
		conf := NewDefaultSyncUserHandlerConfig()
		conf.MaxRecordPayloadBytes = 3
		handler := NewSyncUserHandler(uid, db, conf)

		body := bytes.NewBufferString(`{"payload": "1234"}`)
		resp := requestheaders("PUT", url, body, header, handler)
		if !assert.Equal(http.StatusRequestEntityTooLarge, resp.Code) {
			return
		}
	}

}
func TestSyncUserHandlerStopPurgeClose(t *testing.T) {
	assert := assert.New(t)
	uid := uniqueUID()
	db, _ := syncstorage.NewDB(":memory:", nil)
	handler := NewSyncUserHandler(uid, db, nil)

	handler.StopHTTP()

	// assert 503 with Retry-After header
	url := syncurl(uid, "info/collections")
	resp := request("GET", url, nil, handler)
	assert.Equal(http.StatusServiceUnavailable, resp.Code)
	assert.NotEqual("", resp.Header().Get("Retry-After"))
}
func TestSyncUserHandlerInfoConfiguration(t *testing.T) {

	assert := assert.New(t)
	uid := uniqueUID()
	db, _ := syncstorage.NewDB(":memory:", nil)

	// make sure values propagate from the configuration
	config := &SyncUserHandlerConfig{
		MaxPOSTBytes:          1,
		MaxPOSTRecords:        2,
		MaxTotalBytes:         3,
		MaxTotalRecords:       4,
		MaxRequestBytes:       5,
		MaxRecordPayloadBytes: 6,
	}

	handler := NewSyncUserHandler(uid, db, config)
	resp := request("GET", syncurl(uid, "info/configuration"), nil, handler)

	assert.Equal(http.StatusOK, resp.Code)
	assert.Equal("application/json", resp.Header().Get("Content-Type"))

	jdata := make(map[string]int)
	if err := json.Unmarshal(resp.Body.Bytes(), &jdata); assert.NoError(err) {
		if val, ok := jdata["max_post_bytes"]; assert.True(ok, "max_post_bytes") {
			assert.Equal(val, config.MaxPOSTBytes)
		}
		if val, ok := jdata["max_post_records"]; assert.True(ok, "max_post_records") {
			assert.Equal(val, config.MaxPOSTRecords)
		}
		if val, ok := jdata["max_total_bytes"]; assert.True(ok, "max_total_bytes") {
			assert.Equal(val, config.MaxTotalBytes)
		}
		if val, ok := jdata["max_total_records"]; assert.True(ok, "max_total_records") {
			assert.Equal(val, config.MaxTotalRecords)
		}
		if val, ok := jdata["max_request_bytes"]; assert.True(ok, "max_request_bytes") {
			assert.Equal(val, config.MaxRequestBytes)
		}
		if val, ok := jdata["max_record_payload_bytes"]; assert.True(ok, "max_record_payload_bytes") {
			assert.Equal(val, config.MaxRecordPayloadBytes)
		}
	}
}
func TestSyncUserHandlerInfoCollections(t *testing.T) {
	assert := assert.New(t)

	uid := "123456"

	db, _ := syncstorage.NewDB(":memory:", nil)
	handler := NewSyncUserHandler(uid, db, nil)
	collections := []string{
		"clients",
		"crypto",
		"forms",
		"history",
		"keys",
		"meta",
		"bookmarks",
		"prefs",
		"tabs",
		"passwords",
		"addons",
	}
	for _, cName := range collections {
		cId, _ := db.GetCollectionId(cName)
		db.TouchCollection(cId, cId*1000) // turn the cId into milliseconds
	}

	resp := request("GET", syncurl(uid, "info/collections"), nil, handler)
	assert.Equal(http.StatusOK, resp.Code)
	results := make(map[string]float32)
	if err := json.Unmarshal(resp.Body.Bytes(), &results); !assert.NoError(err) {
		return
	}

	for cName, modified := range results {
		cId, err := db.GetCollectionId(cName)
		if !assert.NoError(err) {
			return
		}

		// after conversion to the sync modified format, should be equal
		assert.Equal(float32(cId), modified)
	}

}
// getElement returns the requested poolElement and if it had to create a new one
// to fulfill the request
func (p *handlerPool) getElement(uid string) (*poolElement, bool, error) {
	var (
		element *poolElement
		ok      bool
		dbFile  string
	)

	p.Lock()
	defer p.Unlock()

	elementCreated := false

	if element, ok = p.elements[uid]; !ok {
		if len(p.base) == 1 && p.base[0] == ":memory:" {
			dbFile = ":memory:"
		} else {
			storageDir, filename := p.PathAndFile(uid)

			// create the sub-directory tree if required
			if _, err := os.Stat(storageDir); os.IsNotExist(err) {
				if err := os.MkdirAll(storageDir, 0755); err != nil {
					return nil, false, errors.Wrap(err, "Could not create datadir")
				}
			}

			// TODO clean the UID of any weird characters, ie: os.PathSeparator
			dbFile = storageDir + string(os.PathSeparator) + filename
		}

		if p.lru.Len() > p.maxPoolSize {
			// nasty, kinda low level locking. Since p.cleanuphandlers also
			// locks, unlock/lock here to avoid deadlocks
			p.Unlock()
			p.cleanupHandlers(1 + p.maxPoolSize/10) // clean up ~10%
			p.Lock()
		}

		db, err := syncstorage.NewDB(dbFile, p.dbConfig)
		if err != nil {
			return nil, false, errors.Wrap(err, "Could not create DB")
		}

		element = &poolElement{
			uid:     uid,
			handler: NewSyncUserHandler(uid, db, nil),
		}

		elementCreated = true

		p.elements[uid] = element

		listElement := p.lru.PushFront(element)
		p.lrumap[uid] = listElement
	} else {
		if element.handler.IsStopped() {
			return nil, false, errElementStopped
		}

		p.lru.MoveToFront(p.lrumap[uid])
	}

	return element, elementCreated, nil
}
func TestCacheHandlerInfoCollections(t *testing.T) {
	assert := assert.New(t)
	handler := NewCacheHandler(cacheMockHandler, DefaultCacheHandlerConfig)
	url := syncurl(uniqueUID(), "info/collections?version=1.5") // sometimes we see queries in the requests

	{
		// basic test to make sure data is what's expected and caching works
		resp := request("GET", url, nil, handler)
		assert.Equal(http.StatusOK, resp.Code)
		assert.True(strings.Contains(resp.Body.String(), `"Type":"info/collections"`))
		assert.Equal("application/json", resp.Header().Get("Content-Type"))

		resp2 := request("GET", url, nil, handler)
		assert.Equal(http.StatusOK, resp2.Code)
		assert.Equal(resp.Body.String(), resp2.Body.String(), "Expected cached value")
		assert.Equal(resp.Header().Get("X-Last-Modified"), resp2.Header().Get("X-Last-Modified"))
	}

	// test POST, PUT, DELETE invalidates the cache
	{
		var last *httptest.ResponseRecorder
		for _, method := range []string{"POST", "PUT", "DELETE"} {

			if last == nil {
				last = request("GET", url, nil, handler)
			}

			// ensures things stay the same
			resp := request("GET", url, nil, handler)
			assert.Equal(http.StatusOK, resp.Code)
			assert.Equal(last.Body.String(), resp.Body.String())
			assert.Equal(last.Header().Get("X-Last-Modified"), resp.Header().Get("X-Last-Modified"))

			// swap it
			last = resp

			// this should clear the cache
			// compensate for sync's time format..
			time.Sleep(10 * time.Millisecond)

			request(method, url, nil, handler)

			_ = method

			//// make sure things are changed
			resp = request("GET", url, nil, handler)
			assert.Equal(http.StatusOK, resp.Code)
			assert.NotEqual(last.Body.String(), resp.Body.String(), "Should have changed")
			assert.NotEqual(last.Header().Get("X-Last-Modified"), resp.Header().Get("X-Last-Modified"))

			last = resp
		}
	}

	// test that different uids get different values
	{
		resp1a := request("GET", syncurl("123", "info/collections"), nil, handler)
		resp1b := request("GET", syncurl("123", "info/collections"), nil, handler)

		time.Sleep(10 * time.Millisecond) /// SYNNNC!! .. so awesome timestamps

		resp2b := request("GET", syncurl("456", "info/collections"), nil, handler)
		resp2a := request("GET", syncurl("456", "info/collections"), nil, handler)

		assert.Equal(http.StatusOK, resp1a.Code)
		assert.Equal(http.StatusOK, resp1b.Code)
		assert.Equal(http.StatusOK, resp2a.Code)
		assert.Equal(http.StatusOK, resp2b.Code)

		// cacheing works
		assert.Equal(resp1a.Body.String(), resp1b.Body.String())
		assert.Equal(resp2a.Body.String(), resp2b.Body.String())

		// no conflicts
		assert.NotEqual(resp1a.Body.String(), resp2a.Body.String())
	}

	{ // test with a real SyncUserHandler
		uid := "123456"
		db, _ := syncstorage.NewDB(":memory:", nil)
		userHandler := NewSyncUserHandler(uid, db, nil)
		handler := NewCacheHandler(userHandler, DefaultCacheHandlerConfig)
		collections := []string{
			"clients", "crypto", "forms", "history", "keys", "meta",
			"bookmarks", "prefs", "tabs", "passwords", "addons",
		}
		for _, cName := range collections {
			cId, _ := db.GetCollectionId(cName)
			db.TouchCollection(cId, syncstorage.Now()+cId) // turn the cId into milliseconds
		}

		resp := request("GET", syncurl(uid, "info/collections"), nil, handler)
		resp2 := request("GET", syncurl(uid, "info/collections"), nil, handler)

		assert.Equal(resp.Body.String(), resp2.Body.String())
		assert.Equal(resp.Header().Get("X-Last-Modified"), resp2.Header().Get("X-Last-Modified"))
	}
}
func TestSyncUserHandlerTidyUp(t *testing.T) {
	assert := assert.New(t)

	{ // test skipping of purging works
		db, _ := syncstorage.NewDB(":memory:", nil)
		config := NewDefaultSyncUserHandlerConfig()
		config.MaxBatchTTL = 1
		handler := NewSyncUserHandler(uniqueUID(), db, config)

		// no purge value in db will always skip, pointless doing a purge
		minPurge := 10 * time.Millisecond
		skipped, _, err := handler.TidyUp(minPurge, minPurge, 1)
		if !assert.NoError(err) || !assert.True(skipped, "Expected to skip") {
			return
		}

		time.Sleep(5 * time.Millisecond)
		// not minPurge yet...
		skipped, _, err = handler.TidyUp(minPurge, minPurge, 1)
		if !assert.NoError(err) || !assert.True(skipped) {
			return
		}

		// enough time has past, the purge should happen
		time.Sleep(10 * time.Millisecond)
		skipped, _, err = handler.TidyUp(minPurge, minPurge, 1)
		if !assert.NoError(err) || !assert.False(skipped) {
			return
		}
	}

	{ // test purging works
		db, _ := syncstorage.NewDB(":memory:", nil)
		config := NewDefaultSyncUserHandlerConfig()
		config.MaxBatchTTL = 1
		handler := NewSyncUserHandler(uniqueUID(), db, config)
		cId := 1
		bId := "bso0"

		payload := "hi"
		ttl := 1

		// write the NEXT_PURGE value
		handler.TidyUp(time.Nanosecond, time.Nanosecond, 1)

		// remember the size a new db
		usageOrig, _ := db.Usage()

		_, err := db.PutBSO(cId, bId, &payload, nil, &ttl)
		if !assert.NoError(err) {
			return
		}

		// put in some large data to make sure the vacuum threshold triggers
		batchId, err := db.BatchCreate(cId, strings.Repeat("1234567890", 5*4*1024))
		if !assert.NoError(err) {
			return
		}

		time.Sleep(time.Millisecond * 10)

		// What's actually being tested... Test that TidyUp will clean up the
		// BSO, Batch and Vacuum the database. Set to 1KB vacuum threshold
		_, _, err = handler.TidyUp(time.Nanosecond, time.Nanosecond, 1)

		if !assert.NoError(err) {
			return
		}

		// make sure the BSO was purged
		_, err = db.GetBSO(cId, bId)
		if !assert.Equal(syncstorage.ErrNotFound, err) {
			return
		}

		// make sure the batch was purged
		exists, err := db.BatchExists(batchId, cId)
		if !assert.NoError(err) && !assert.False(exists) {
			return
		}

		usage, err := db.Usage()
		if !assert.NoError(err) {
			return
		}

		assert.Equal(usageOrig.Total, usage.Total, "Expected size to be back to original")
		assert.Equal(0, usage.Free)
	}
}
// TestSyncUserHandlerPOSTBatch tests that a batch can be created, appended to and commited
func TestSyncUserHandlerPOSTBatch(t *testing.T) {

	assert := assert.New(t)

	bodyCreate := bytes.NewBufferString(`[
		{"id":"bso0", "payload": "bso0"},
		{"id":"bso1", "payload": "bso1"}
	]`)
	bodyAppend := bytes.NewBufferString(`[
		{"id":"bso2", "payload": "bso2"},
		{"id":"bso3", "payload": "bso3"}
	]`)
	bodyCommit := bytes.NewBufferString(`[
		{"id":"bso4", "payload": "bso4"},
		{"id":"bso5", "payload": "bso5"}
	]`)

	uid := "123456"
	collection := "testcol"
	url := syncurl(uid, "storage/"+collection)
	header := make(http.Header)
	header.Add("Content-Type", "application/json")

	{ // test common flow
		db, _ := syncstorage.NewDB(":memory:", nil)
		handler := NewSyncUserHandler(uid, db, nil)

		respCreate := requestheaders("POST", url+"?batch=true", bodyCreate, header, handler)
		if !assert.Equal(http.StatusAccepted, respCreate.Code, respCreate.Body.String()) {
			return
		}
		var createResults PostResults
		if err := json.Unmarshal(respCreate.Body.Bytes(), &createResults); !assert.NoError(err) {
			return
		}

		assert.Equal(batchIdString(1), createResults.Batch) // clean db, always gets 1
		batchIdString := createResults.Batch

		respAppend := requestheaders("POST", url+"?batch="+batchIdString, bodyAppend, header, handler)
		assert.Equal(http.StatusAccepted, respAppend.Code, respAppend.Body.String())

		respCommit := requestheaders("POST", url+"?commit=1&batch="+batchIdString, bodyCommit, header, handler)
		assert.Equal(http.StatusOK, respCommit.Code, respCommit.Body.String())

		cId, _ := db.GetCollectionId(collection)
		for bIdNum := 0; bIdNum <= 5; bIdNum++ {
			bId := "bso" + strconv.Itoa(bIdNum)
			_, err := db.GetBSO(cId, bId)
			assert.NoError(err, "Could not find bso: %s", bId)
		}

		// make sure the batch is no longer in the db
		batchIdInt, _ := batchIdInt(createResults.Batch)
		_, errMissing := db.BatchLoad(batchIdInt, cId)
		assert.Equal(syncstorage.ErrBatchNotFound, errMissing)
	}

	{ // test commit=1&batch=true
		db, _ := syncstorage.NewDB(":memory:", nil)
		handler := NewSyncUserHandler(uid, db, nil)

		bodyCreate := bytes.NewBufferString(`[
			{"id":"bso0", "payload": "bso0"},
			{"id":"bso1", "payload": "bso1"},
			{"id":"bso2", "payload": "bso2"}
		]`)

		respCreate := requestheaders("POST", url+"?batch=true&commit=1", bodyCreate, header, handler)
		if !assert.Equal(http.StatusOK, respCreate.Code, respCreate.Body.String()) {
			return
		}
	}

	{ // test batch=true and an empty commit
		db, _ := syncstorage.NewDB(":memory:", nil)
		handler := NewSyncUserHandler(uid, db, nil)

		bodyCreate := bytes.NewBufferString(`[
			{"id":"bso0", "payload": "bso0"},
			{"id":"bso1", "payload": "bso1"},
			{"id":"bso2", "payload": "bso2"}
		]`)

		respCreate := requestheaders("POST", url+"?batch=true", bodyCreate, header, handler)
		if !assert.Equal(http.StatusAccepted, respCreate.Code, respCreate.Body.String()) {
			return
		}

		var createResults PostResults
		if err := json.Unmarshal(respCreate.Body.Bytes(), &createResults); !assert.NoError(err) {
			return
		}

		assert.Equal(batchIdString(1), createResults.Batch) // clean db, always gets 1
		batchIdString := createResults.Batch
		body := bytes.NewReader([]byte("[]"))
		respCommit := requestheaders("POST", url+"?commit=1&batch="+batchIdString, body, header, handler)
		if !assert.Equal(http.StatusOK, respCommit.Code, respCommit.Body.String()) {
			return
		}

	}
}
// TestSyncUserHandlerPOST tests that POSTs behave correctly
func TestSyncUserHandlerPOST(t *testing.T) {
	t.Parallel()
	assert := assert.New(t)

	uid := "123456"

	db, _ := syncstorage.NewDB(":memory:", nil)
	handler := NewSyncUserHandler(uid, db, nil)
	url := syncurl(uid, "storage/bookmarks")

	{
		header := make(http.Header)
		header.Add("Content-Type", "application/octet-stream")

		resp := requestheaders("POST", url, nil, header, handler)
		if !assert.Equal(http.StatusUnsupportedMediaType, resp.Code) {
			return
		}
	}

	// Make sure INSERT works first
	body := bytes.NewBufferString(`[
		{"id":"bso1", "payload": "initial payload", "sortindex": 1, "ttl": 2100000},
		{"id":"bso2", "payload": "initial payload", "sortindex": 1, "ttl": 2100000},
		{"id":"bso3", "payload": "initial payload", "sortindex": 1, "ttl": 2100000}
	]`)

	// POST new data
	header := make(http.Header)
	header.Add("Content-Type", "application/json")
	cId, _ := db.GetCollectionId("bookmarks")

	{
		resp := requestheaders("POST", url, body, header, handler)

		assert.Equal(http.StatusOK, resp.Code)

		var results PostResults
		jsbody := resp.Body.Bytes()
		err := json.Unmarshal(jsbody, &results)
		if !assert.NoError(err) {
			return
		}

		assert.Len(results.Success, 3)
		assert.Len(results.Failed, 0)

		// verify it made it into the db ok
		for _, bId := range []string{"bso1", "bso2", "bso3"} {
			bso, _ := db.GetBSO(cId, bId)
			assert.Equal("initial payload", bso.Payload)
			assert.Equal(1, bso.SortIndex)
		}
	}

	{
		// Test that updates work
		body = bytes.NewBufferString(`[
			{"id":"bso1", "sortindex": 2},
			{"id":"bso2", "payload": "updated payload"},
			{"id":"bso3", "payload": "updated payload", "sortindex":3}
		]`)

		resp := requestheaders("POST", url, body, header, handler)
		assert.Equal(http.StatusOK, resp.Code)

		bso, _ := db.GetBSO(cId, "bso1")
		assert.Equal("initial payload", bso.Payload) // stayed the same
		assert.Equal(2, bso.SortIndex)               // it updated

		bso, _ = db.GetBSO(cId, "bso2")
		assert.Equal("updated payload", bso.Payload) // updated
		assert.Equal(1, bso.SortIndex)               // same

		bso, _ = db.GetBSO(cId, "bso3")
		assert.Equal(bso.Payload, "updated payload") // updated
		assert.Equal(3, bso.SortIndex)               // updated
	}

	{ // ref: issue #108 - handling of Content-Type like application/json;charset=utf-8
		body := bytes.NewBufferString(`[
			{"id":"bsoa", "payload": "initial payload", "sortindex": 1, "ttl": 2100000},
			{"id":"bsob", "payload": "initial payload", "sortindex": 1, "ttl": 2100000},
			{"id":"bsoc", "payload": "initial payload", "sortindex": 1, "ttl": 2100000}
		]`)

		// POST new data
		header := make(http.Header)
		header.Add("Content-Type", "application/json;charset=utf-8")
		resp := requestheaders("POST", url, body, header, handler)
		assert.Equal(http.StatusOK, resp.Code)
	}

	{ // test error when payload is too large
		body := bytes.NewBufferString(`[
			{"id":"bsoA", "payload": "1234567890"},
			{"id":"bsoB", "payload": "12345"}
		]`)

		// make a small acceptable payload
		config := NewDefaultSyncUserHandlerConfig()
		config.MaxRecordPayloadBytes = 5

		handler := NewSyncUserHandler(uid, db, config)
		url := syncurl(uid, "storage/bookmarks")

		resp := requestheaders("POST", url, body, header, handler)
		assert.Equal(http.StatusOK, resp.Code)

		var results PostResults
		err := json.Unmarshal(resp.Body.Bytes(), &results)
		if !assert.NoError(err) {
			return
		}

		assert.Len(results.Success, 1)
		if assert.Len(results.Failed, 1) {
			assert.Equal("Payload too large", results.Failed["bsoA"][0])
		}

	}
}