func TestShadowerPush(t *testing.T) { //base.LogKeys["Shadow"] = true bucket := makeExternalBucket() defer bucket.Close() db := setupTestDB(t) defer tearDownTestDB(t, db) var err error db.Shadower, err = NewShadower(db.DatabaseContext, bucket, nil) assertNoError(t, err, "NewShadower") key1rev1, err := db.Put("key1", Body{"aaa": "bbb"}) assertNoError(t, err, "Put") _, err = db.Put("key2", Body{"ccc": "ddd"}) assertNoError(t, err, "Put") base.Log("Waiting for shadower to catch up...") var doc1, doc2 Body waitFor(t, func() bool { return bucket.Get("key1", &doc1) == nil && bucket.Get("key2", &doc2) == nil }) assert.DeepEquals(t, doc1, Body{"aaa": "bbb"}) assert.DeepEquals(t, doc2, Body{"ccc": "ddd"}) base.Log("Deleting local doc") db.DeleteDoc("key1", key1rev1) waitFor(t, func() bool { err = bucket.Get("key1", &doc1) return err != nil }) assert.True(t, base.IsDocNotFoundError(err)) }
// Looks up a User by email address. func (auth *Authenticator) GetUserByEmail(email string) (User, error) { var info userByEmailInfo err := auth.bucket.Get(docIDForUserEmail(email), &info) if base.IsDocNotFoundError(err) { return nil, nil } else if err != nil { return nil, err } return auth.GetUser(info.Username) }
// Gets the body of a revision's nearest ancestor, as raw JSON (without _id or _rev.) // If no ancestor has any JSON, returns nil but no error. func (db *Database) getAncestorJSON(doc *document, revid string) ([]byte, error) { for { if revid = doc.History.getParent(revid); revid == "" { return nil, nil } else if body, err := db.getRevisionJSON(doc, revid); body != nil { return body, nil } else if !base.IsDocNotFoundError(err) { return nil, err } } }
// Looks up the information for a user. // If the username is "" it will return the default (guest) User object, not nil. // By default the guest User has access to everything, i.e. Admin Party! This can // be changed by altering its list of channels and saving the changes via SetUser. func (auth *Authenticator) GetUser(username string) (*User, error) { var user *User err := auth.bucket.Get(docIDForUser(username), &user) if err != nil && !base.IsDocNotFoundError(err) { return nil, err } if user == nil && username == "" { user = &User{Name: username, Channels: []string{"*"}} } return user, nil }
func (db *Database) getOldRevisionJSON(docid string, revid string) ([]byte, error) { data, err := db.Bucket.GetRaw(oldRevisionKey(docid, revid)) if base.IsDocNotFoundError(err) { base.LogTo("CRUD+", "No old revision %q / %q", docid, revid) err = nil } if data != nil { base.LogTo("CRUD+", "Got old revision %q / %q --> %d bytes", docid, revid, len(data)) } return data, err }
// Calls the JS sync function to assign the doc to channels, grant users // access to channels, and reject invalid documents. func (db *Database) getChannelsAndAccess(doc *document, body Body, parentRevID string) (result base.Set, access channels.AccessMap, roles channels.AccessMap, err error) { base.LogTo("CRUD+", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"]) // Get the parent revision, to pass to the sync function: var oldJson string if parentRevID != "" { var oldJsonBytes []byte oldJsonBytes, err = db.getRevisionJSON(doc, parentRevID) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return } oldJson = string(oldJsonBytes) } if db.ChannelMapper != nil { // Call the ChannelMapper: var output *channels.ChannelMapperOutput output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson, makeUserCtx(db.user)) if err == nil { result = output.Channels access = output.Access roles = output.Roles err = output.Rejection if err != nil { base.Log("Sync fn rejected: new=%+v old=%s --> %s", body, oldJson, err) } else if !validateAccessMap(access) || !validateRoleAccessMap(roles) { err = base.HTTPErrorf(500, "Error in JS sync function") } } else { base.Warn("Sync fn exception: %+v; doc = %s", err, body) err = base.HTTPErrorf(500, "Exception in JS sync function") } } else { // No ChannelMapper so by default use the "channels" property: value, _ := body["channels"].([]interface{}) if value != nil { array := make([]string, 0, len(value)) for _, channel := range value { channelStr, ok := channel.(string) if ok && len(channelStr) > 0 { array = append(array, channelStr) } } result, err = channels.SetFromArray(array, channels.KeepStar) } } return }
// HTTP handler for a PUT of an attachment func (h *handler) handlePutAttachment() error { docid := h.PathVar("docid") attachmentName := h.PathVar("attach") attachmentContentType := h.rq.Header.Get("Content-Type") if attachmentContentType == "" { attachmentContentType = "application/octet-stream" } revid := h.getQuery("rev") if revid == "" { revid = h.rq.Header.Get("If-Match") } attachmentData, err := h.readBody() if err != nil { return err } body, err := h.db.GetRev(docid, revid, false, nil) if err != nil && base.IsDocNotFoundError(err) { // couchdb creates empty body on attachment PUT // for non-existant doc id body = db.Body{} body["_rev"] = revid } else if err != nil { return err } else if body != nil { body["_rev"] = revid } // find attachment (if it existed) attachments := db.BodyAttachments(body) if attachments == nil { attachments = make(map[string]interface{}) } // create new attachment attachment := make(map[string]interface{}) attachment["data"] = attachmentData attachment["content_type"] = attachmentContentType //attach it attachments[attachmentName] = attachment body["_attachments"] = attachments newRev, err := h.db.Put(docid, body) if err != nil { return err } h.setHeader("Etag", newRev) h.writeJSONStatus(http.StatusCreated, db.Body{"ok": true, "id": docid, "rev": newRev}) return nil }
// Loads a channel's log from the database and returns it. func (db *Database) GetChangeLog(channelName string, afterSeq uint64) (*channels.ChangeLog, error) { if raw, err := db.Bucket.GetRaw(channelLogDocID(channelName)); err == nil { log, err := decodeChannelLog(raw) if err == nil { log.FilterAfter(afterSeq) } return log, err } else { if base.IsDocNotFoundError(err) { err = nil } return nil, err } }
// Given a document ID and a set of revision IDs, looks up which ones are not known. func (db *Database) RevDiff(docid string, revids []string) (missing, possible []string) { if strings.HasPrefix(docid, "_design/") && db.user != nil { return // Users can't upload design docs, so ignore them } doc, err := db.GetDoc(docid) if err != nil { if !base.IsDocNotFoundError(err) { base.Warn("RevDiff(%q) --> %T %v", docid, err, err) // If something goes wrong getting the doc, treat it as though it's nonexistent. } missing = revids return } revmap := doc.History found := make(map[string]bool) maxMissingGen := 0 for _, revid := range revids { if revmap.contains(revid) { found[revid] = true } else { if missing == nil { missing = make([]string, 0, 5) } gen, _ := parseRevID(revid) if gen > 0 { missing = append(missing, revid) if gen > maxMissingGen { maxMissingGen = gen } } } } if missing != nil { possible = make([]string, 0, 5) for revid, _ := range revmap { gen, _ := parseRevID(revid) if !found[revid] && gen < maxMissingGen { possible = append(possible, revid) } } if len(possible) == 0 { possible = nil } } return }
func (auth *Authenticator) AuthenticateCookie(rq *http.Request) (*User, error) { cookie, _ := rq.Cookie(CookieName) if cookie == nil { return nil, nil } var session LoginSession err := auth.bucket.Get(docIDForSession(cookie.Value), &session) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return nil, err } // Don't need to check session.Expiration, because Couchbase will have nuked the document. return auth.GetUser(session.Username) }
// Sets the database context's sync function based on the JS code from config. // If the function is different from the prior one, all documents are run through it again to // update their channel assignments and the access privileges they assign to users and roles. // If importExistingDocs is true, documents in the bucket that are not known to Sync Gateway will // be imported (have _sync data added) and run through the sync function. func (context *DatabaseContext) ApplySyncFun(syncFun string, importExistingDocs bool) error { var err error if syncFun == "" { context.ChannelMapper = nil } else if context.ChannelMapper != nil { _, err = context.ChannelMapper.SetFunction(syncFun) } else { context.ChannelMapper = channels.NewChannelMapper(syncFun) } if err != nil { base.Warn("Error setting sync function: %s", err) return err } // Check whether the sync function is different from the previous one: var syncData struct { Sync string } err = context.Bucket.Get(kSyncDataKey, &syncData) syncDataMissing := base.IsDocNotFoundError(err) if err != nil && !syncDataMissing { return err } else if syncFun == syncData.Sync { // Sync function hasn't changed. But if importing, scan imported docs anyway: if importExistingDocs { db := &Database{context, nil} return db.UpdateAllDocChannels(false, importExistingDocs) } return nil } else { if !syncDataMissing { // It's changed, so re-run it on all docs: db := &Database{context, nil} if err = db.UpdateAllDocChannels(true, importExistingDocs); err != nil { return err } } // Finally save the new function source: syncData.Sync = syncFun return context.Bucket.Set(kSyncDataKey, 0, syncData) } }
// Loads a channel's log from the database and returns it. func (c *changesWriter) getChangeLog(channelName string, afterSeq uint64) (*channels.ChangeLog, error) { raw, err := c.bucket.GetRaw(channelLogDocID(channelName)) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return nil, err } log := channels.DecodeChangeLog(bytes.NewReader(raw), afterSeq) if log == nil { // Log is corrupt, so delete it; caller will regenerate it. c.bucket.Delete(channelLogDocID(channelName)) return nil, fmt.Errorf("Corrupt log") } base.LogTo("ChannelLog", "Read %q -- %d bytes, %d entries (since=%d) after #%d", channelName, len(raw), len(log.Entries), log.Since, afterSeq) return log, nil }
// Loads a channel's log from the database and returns it. func (c *channelLogWriter) getChangeLog(afterSeq uint64) (*channels.ChangeLog, error) { c.cacheMutex.RLock() cachedLog := c.cachedLog c.cacheMutex.RUnlock() // Read from cache if available: entries := cachedLog.EntriesAfter(afterSeq) if entries == nil && afterSeq > cachedLog.Since { entries = cachedLog.Entries } if entries != nil { log := &channels.ChangeLog{Since: afterSeq, Entries: entries} if log.HasEmptyEntries() { log = log.CopyRemovingEmptyEntries() } base.LogTo("Changes", "Using cached entries for afterSeq=%d (returning %d)", afterSeq, len(log.Entries)) dbExpvars.Add("channelLogCacheHits", 1) return log, nil } raw, err := c.bucket.GetRaw(channelLogDocID(c.channelName)) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return nil, err } dbExpvars.Add("channelLogCacheMisses", 1) log := channels.DecodeChangeLog(bytes.NewReader(raw), afterSeq, nil) if log == nil { // Log is corrupt, so delete it; caller will regenerate it. c.bucket.Delete(channelLogDocID(c.channelName)) return nil, fmt.Errorf("Corrupt log") } log = log.CopyRemovingEmptyEntries() base.LogTo("ChannelLog", "Read %q -- %d bytes, %d entries (since=%d) after #%d", c.channelName, len(raw), len(log.Entries), log.Since, afterSeq) return log, nil }
func (auth *Authenticator) AuthenticateCookie(rq *http.Request, response http.ResponseWriter) (User, error) { cookie, _ := rq.Cookie(CookieName) if cookie == nil { return nil, nil } var session LoginSession err := auth.bucket.Get(docIDForSession(cookie.Value), &session) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return nil, err } // Don't need to check session.Expiration, because Couchbase will have nuked the document. //update the session Expiration if 10% or more of the current expiration time has elapsed //if the session does not contain a Ttl (probably created prior to upgrading SG), use //default value of 24Hours if session.Ttl == 0 { session.Ttl = kDefaultSessionTTL } duration := session.Ttl sessionTimeElapsed := int((time.Now().Add(duration).Sub(session.Expiration)).Seconds()) tenPercentOfTtl := int(duration.Seconds()) / 10 if sessionTimeElapsed > tenPercentOfTtl { session.Expiration = time.Now().Add(duration) ttlSec := int(duration.Seconds()) if err = auth.bucket.Set(docIDForSession(session.ID), ttlSec, session); err != nil { return nil, err } cookie.Expires = session.Expiration http.SetCookie(response, cookie) } user, err := auth.GetUser(session.Username) if user != nil && user.Disabled() { user = nil } return user, err }
// Writes new changes to my channel log document. func (c *channelLogWriter) addToChangeLog_(entries []*changeEntry) { var err error dbExpvars.Add("channelLogAdds", 1) logDocID := channelLogDocID(c.channelName) // A fraction of the time we will do a full update and clean stuff out. fullUpdate := AlwaysCompactChangeLog || len(entries) > MaxChangeLogLength/2 || rand.Intn(MaxChangeLogLength/len(entries)) == 0 if !fullUpdate { // Non-full update; just append the new entries: w := bytes.NewBuffer(make([]byte, 0, 100*len(entries))) for _, entry := range entries { entry.logEntry.Encode(w, entry.parentRevID) } data := w.Bytes() err = c.bucket.Append(logDocID, data) if err == nil { base.LogTo("ChannelLog", "Appended %d sequence(s) to %q", len(entries), c.channelName) dbExpvars.Add("channelLogAppends", 1) } else if base.IsDocNotFoundError(err) { // Append failed due to doc not existing, so fall back to full update err = nil fullUpdate = true } else { base.Warn("Error appending to %q -- %v", len(entries), c.channelName, err) } } if fullUpdate { // Full update: do a CAS-based read+write: fullUpdateAttempts := 0 var oldChangeLogCount, newChangeLogCount int err = c.bucket.WriteUpdate(logDocID, 0, func(currentValue []byte) ([]byte, walrus.WriteOptions, error) { fullUpdateAttempts++ numToKeep := MaxChangeLogLength - len(entries) if len(currentValue) == 0 || numToKeep <= 0 { // If log was missing or empty, or will be entirely overwritten, create a new one: entriesToWrite := entries if numToKeep < 0 { entriesToWrite = entries[-numToKeep:] } channelLog := channels.ChangeLog{} for _, entry := range entriesToWrite { channelLog.Add(*entry.logEntry) } newChangeLogCount = len(entriesToWrite) oldChangeLogCount = newChangeLogCount return encodeChannelLog(&channelLog), walrus.Raw, nil } else { // Append to an already existing change log: var newValue bytes.Buffer var nRemoved int nRemoved, newChangeLogCount = channels.TruncateEncodedChangeLog( bytes.NewReader(currentValue), numToKeep, numToKeep/2, &newValue) for _, entry := range entries { entry.logEntry.Encode(&newValue, entry.parentRevID) } oldChangeLogCount = nRemoved + newChangeLogCount newChangeLogCount += len(entries) return newValue.Bytes(), walrus.Raw, nil } }) if err == nil { dbExpvars.Add("channelLogRewrites", 1) dbExpvars.Add("channelLogRewriteCollisions", int64(fullUpdateAttempts-1)) base.LogTo("ChannelLog", "Wrote %d sequences (was %d now %d) to %q in %d attempts", len(entries), oldChangeLogCount, newChangeLogCount, c.channelName, fullUpdateAttempts) } else { base.Warn("Error writing %d sequence(s) to %q -- %v", len(entries), c.channelName, err) } } }