예제 #1
1
// Removes the oldest entries to limit the log's length to `maxLength`.
// This is the same as ChangeLog.Truncate except it works directly on the encoded form, which is
// much faster than decoding+truncating+encoding.
func TruncateEncodedChangeLog(r *bytes.Reader, maxLength, minLength int, w io.Writer) (removed int, newLength int) {
	since := readSequence(r)
	// Find the starting position and sequence of each entry:
	entryPos := make([]int64, 0, 1000)
	entrySeq := make([]uint64, 0, 1000)
	for {
		pos, err := r.Seek(0, 1)
		if err != nil {
			panic("Seek??")
		}
		flags, err := r.ReadByte()
		if err != nil {
			if err == io.EOF {
				break // eof
			}
			panic("ReadByte failed")
		}
		seq := readSequence(r)
		skipString(r)
		skipString(r)
		skipString(r)
		if flags > kMaxFlag {
			panic(fmt.Sprintf("TruncateEncodedChangeLog: bad flags 0x%x, entry %d, offset %d",
				flags, len(entryPos), pos))
		}

		entryPos = append(entryPos, pos)
		entrySeq = append(entrySeq, seq)
	}

	// How many entries to remove?
	// * Leave no more than maxLength entries
	// * Every sequence value removed should be less than every sequence remaining.
	// * The new 'since' value should be the maximum sequence removed.
	oldLength := len(entryPos)
	removed = oldLength - maxLength
	if removed <= 0 {
		removed = 0
	} else {
		pivot, newSince := findPivot(entrySeq, removed-1)
		removed = pivot + 1
		if oldLength-removed >= minLength {
			since = newSince
		} else {
			removed = 0
			base.Warn("TruncateEncodedChangeLog: Couldn't find a safe place to truncate")
			//TODO: Possibly find a pivot earlier than desired?
		}
	}

	// Write the updated Since and the remaining entries:
	writeSequence(since, w)
	if _, err := r.Seek(entryPos[removed], 0); err != nil {
		panic("Seek back???")
	}
	if _, err := io.Copy(w, r); err != nil {
		panic("Copy???")
	}
	return removed, oldLength - removed
}
예제 #2
0
// Writes an object to the response in JSON format.
// If status is nonzero, the header will be written with that status.
func (h *handler) writeJSONStatus(status int, value interface{}) {
	if !h.requestAccepts("application/json") {
		base.Warn("Client won't accept JSON, only %s", h.rq.Header.Get("Accept"))
		h.writeStatus(http.StatusNotAcceptable, "only application/json available")
		return
	}

	jsonOut, err := json.Marshal(value)
	if err != nil {
		base.Warn("Couldn't serialize JSON for %v", value)
		h.writeStatus(http.StatusInternalServerError, "JSON serialization failed")
		return
	}
	if PrettyPrint {
		var buffer bytes.Buffer
		json.Indent(&buffer, jsonOut, "", "  ")
		jsonOut = append(buffer.Bytes(), '\n')
	}
	h.setHeader("Content-Type", "application/json")
	if h.rq.Method != "HEAD" {
		h.setHeader("Content-Length", fmt.Sprintf("%d", len(jsonOut)))
		if status > 0 {
			h.response.WriteHeader(status)
			h.logStatus(status, "")
		}
		h.response.Write(jsonOut)
	} else if status > 0 {
		h.response.WriteHeader(status)
		h.logStatus(status, "")
	}
}
예제 #3
0
// Saves a new local revision to the external bucket.
func (s *Shadower) PushRevision(doc *document) {
	defer func() { atomic.AddUint64(&s.pushCount, 1) }()
	if !s.docIDMatches(doc.ID) {
		return
	} else if doc.newestRevID() == doc.UpstreamRev {
		return // This revision was pulled from the external bucket, so don't push it back!
	}

	var err error
	if doc.Flags&channels.Deleted != 0 {
		base.LogTo("Shadow", "Pushing %q, rev %q [deletion]", doc.ID, doc.CurrentRev)
		err = s.bucket.Delete(doc.ID)
	} else {
		base.LogTo("Shadow", "Pushing %q, rev %q", doc.ID, doc.CurrentRev)
		body := doc.getRevision(doc.CurrentRev)
		if body == nil {
			base.Warn("Can't get rev %q.%q to push to external bucket", doc.ID, doc.CurrentRev)
			return
		}
		err = s.bucket.Set(doc.ID, 0, body)
	}
	if err != nil {
		base.Warn("Error pushing rev of %q to external bucket: %v", doc.ID, err)
	}
}
예제 #4
0
// Adds a document body and/or its conflicts to a ChangeEntry
func (db *Database) addDocToChangeEntry(entry *ChangeEntry, options ChangesOptions) {
	includeConflicts := options.Conflicts && entry.branched
	if !options.IncludeDocs && !includeConflicts {
		return
	}
	doc, err := db.GetDoc(entry.ID)
	if err != nil {
		base.Warn("Changes feed: error getting doc %q: %v", entry.ID, err)
		return
	}

	revID := entry.Changes[0]["rev"]
	if includeConflicts {
		doc.History.forEachLeaf(func(leaf *RevInfo) {
			if leaf.ID != revID {
				entry.Changes = append(entry.Changes, ChangeRev{"rev": leaf.ID})
				if !leaf.Deleted {
					entry.Deleted = false
				}
			}
		})
	}
	if options.IncludeDocs {
		var err error
		entry.Doc, err = db.getRevFromDoc(doc, revID, false)
		if err != nil {
			base.Warn("Changes feed: error getting doc %q/%q: %v", doc.ID, revID, err)
		}
	}
}
예제 #5
0
// Iterates over all documents in the database, calling the callback function on each
func (db *Database) ForEachDocID(callback ForEachDocIDFunc) error {
	type viewRow struct {
		Key   string
		Value struct {
			RevID    string   `json:"r"`
			Sequence uint64   `json:"s"`
			Channels []string `json:"c"`
		}
	}
	var vres struct {
		Rows []viewRow
	}
	opts := Body{"stale": false, "reduce": false}
	err := db.Bucket.ViewCustom("sync_housekeeping", "all_docs", opts, &vres)
	if err != nil {
		base.Warn("all_docs got error: %v", err)
		return err
	}

	for _, row := range vres.Rows {
		err = callback(IDAndRev{row.Key, row.Value.RevID, row.Value.Sequence}, row.Value.Channels)
		if err != nil {
			return err
		}
	}
	return nil
}
예제 #6
0
파일: crud.go 프로젝트: racido/sync_gateway
// Moves a revision's ancestor's body out of the document object and into a separate db doc.
func (db *Database) backupAncestorRevs(doc *document, revid string) error {
	// Find an ancestor that still has JSON in the document:
	var json []byte
	for {
		if revid = doc.History.getParent(revid); revid == "" {
			return nil // No ancestors with JSON found
		} else if json = doc.getRevisionJSON(revid); json != nil {
			break
		}
	}

	// Store the JSON as a separate doc in the bucket:
	if err := db.setOldRevisionJSON(doc.ID, revid, json); err != nil {
		// This isn't fatal since we haven't lost any information; just warn about it.
		base.Warn("backupAncestorRevs failed: doc=%q rev=%q err=%v", doc.ID, revid, err)
		return err
	}

	// Nil out the rev's body in the document struct:
	if revid == doc.CurrentRev {
		doc.body = nil
	} else {
		doc.History.setRevisionBody(revid, nil)
	}
	base.LogTo("CRUD+", "Backed up obsolete rev %q/%q", doc.ID, revid)
	return nil
}
예제 #7
0
// Parses a JSON MIME body, unmarshaling it into "into".
func ReadJSONFromMIME(headers http.Header, input io.Reader, into interface{}) error {
	contentType := headers.Get("Content-Type")
	if contentType != "" && !strings.HasPrefix(contentType, "application/json") {
		return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Invalid content type %s", contentType)
	}

	switch headers.Get("Content-Encoding") {
	case "gzip":
		var err error
		if input, err = gzip.NewReader(input); err != nil {
			return err
		}
	case "":
		break
	default:
		return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Unsupported Content-Encoding; use gzip")
	}

	decoder := json.NewDecoder(input)
	if err := decoder.Decode(into); err != nil {
		base.Warn("Couldn't parse JSON in HTTP request: %v", err)
		return base.HTTPErrorf(http.StatusBadRequest, "Bad JSON")
	}
	return nil
}
예제 #8
0
// Main loop that pulls changes from the external bucket. (Runs in its own goroutine.)
func (s *Shadower) readTapFeed() {
	vbucketsFilling := 0
	for event := range s.tapFeed.Events() {
		switch event.Opcode {
		case walrus.TapBeginBackfill:
			if vbucketsFilling == 0 {
				base.LogTo("Shadow", "Reading history of external bucket")
			}
			vbucketsFilling++
			//base.LogTo("Shadow", "Reading history of external bucket")
		case walrus.TapMutation, walrus.TapDeletion:
			key := string(event.Key)
			// Ignore ephemeral documents or ones whose ID would conflict with our metadata
			if event.Expiry > 0 || !s.docIDMatches(key) {
				break
			}
			isDeletion := event.Opcode == walrus.TapDeletion
			err := s.pullDocument(key, event.Value, isDeletion, event.Sequence, event.Flags)
			if err != nil {
				base.Warn("Error applying change from external bucket: %v", err)
			}
		case walrus.TapEndBackfill:
			if vbucketsFilling--; vbucketsFilling == 0 {
				base.LogTo("Shadow", "Caught up with history of external bucket")
			}
		}
	}
	base.LogTo("Shadow", "End of tap feed(?)")
}
예제 #9
0
func (s *sequenceAllocator) lastSequence() (uint64, error) {
	last, err := s.bucket.Incr("_sync:seq", 0, 0, 0)
	if err != nil {
		base.Warn("Error from Incr in lastSequence(): %v", err)
	}
	return last, err
}
예제 #10
0
// Main loop that pulls changes from the external bucket. (Runs in its own goroutine.)
func (s *Shadower) readTapFeed() {
	vbucketsFilling := 0
	for event := range s.tapFeed.Events() {
		switch event.Opcode {
		case walrus.TapBeginBackfill:
			if vbucketsFilling == 0 {
				base.LogTo("Shadow", "Reading history of external bucket")
			}
			vbucketsFilling++
			//base.LogTo("Shadow", "Reading history of external bucket")
		case walrus.TapMutation, walrus.TapDeletion:
			key := string(event.Key)
			if !s.docIDMatches(key) {
				break
			}
			isDeletion := event.Opcode == walrus.TapDeletion
			if !isDeletion && event.Expiry > 0 {
				break // ignore ephemeral documents
			}
			err := s.pullDocument(key, event.Value, isDeletion, event.Sequence, event.Flags)
			if err != nil {
				base.Warn("Error applying change from external bucket: %v", err)
			}
			atomic.AddUint64(&s.pullCount, 1)
		case walrus.TapEndBackfill:
			if vbucketsFilling--; vbucketsFilling == 0 {
				base.LogTo("Shadow", "Caught up with history of external bucket")
			}
		}
	}
	base.LogTo("Shadow", "End of tap feed(?)")
}
예제 #11
0
func (c *changeCache) processPrincipalDoc(docID string, docJSON []byte, isUser bool) {
	// Currently the cache isn't really doing much with user docs; mostly it needs to know about
	// them because they have sequence numbers, so without them the sequence of sequences would
	// have gaps in it, causing later sequences to get stuck in the queue.
	princ, err := c.context.Authenticator().UnmarshalPrincipal(docJSON, "", 0, isUser)
	if princ == nil {
		base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
		return
	}
	sequence := princ.Sequence()
	if sequence <= c.initialSequence {
		return // Tap is sending us an old value from before I started up; ignore it
	}

	// Now add the (somewhat fictitious) entry:
	change := &LogEntry{
		Sequence:     sequence,
		TimeReceived: time.Now(),
	}
	if isUser {
		change.DocID = "_user/" + princ.Name()
	} else {
		change.DocID = "_role/" + princ.Name()
	}

	base.LogTo("Cache", "Received #%d (%q)", change.Sequence, change.DocID)

	c.processEntry(change)
}
예제 #12
0
// POST /_persona creates a browserID-based login session and sets its cookie.
// It's API-compatible with the CouchDB plugin: <https://github.com/iriscouch/browserid_couchdb/>
func (h *handler) handlePersonaPOST() error {
	var params struct {
		Assertion string `json:"assertion"`
	}
	err := db.ReadJSONFromMIME(h.rq.Header, h.rq.Body, &params)
	if err != nil {
		return err
	}

	origin := h.server.config.Persona.Origin
	if origin == "" {
		base.Warn("Can't accept Persona logins: Server URL not configured")
		return &base.HTTPError{http.StatusInternalServerError, "Server url not configured"}
	}

	// OK, now verify it:
	base.Log("Persona: Verifying assertion %q for %q", params.Assertion, origin)
	verifiedInfo, err := VerifyPersona(params.Assertion, origin)
	if err != nil {
		base.Log("Persona: Failed verify: %v", err)
		return err
	}
	base.Log("Persona: Logged in %q!", verifiedInfo.Email)

	createUserIfNeeded := h.server.config.Persona.Register
	return h.makeSessionFromEmail(verifiedInfo.Email, createUserIfNeeded)

}
예제 #13
0
func (h *handler) readDocument() (db.Body, error) {
	contentType, attrs, _ := mime.ParseMediaType(h.rq.Header.Get("Content-Type"))
	switch contentType {
	case "", "application/json":
		return h.readJSON()
	case "multipart/related":
		if DebugMultipart {
			raw, err := ioutil.ReadAll(h.rq.Body)
			if err != nil {
				return nil, err
			}
			reader := multipart.NewReader(bytes.NewReader(raw), attrs["boundary"])
			body, err := db.ReadMultipartDocument(reader)
			if err != nil {
				ioutil.WriteFile("GatewayPUT.mime", raw, 0600)
				base.Warn("Error reading MIME data: copied to file GatewayPUT.mime")
			}
			return body, err
		} else {
			reader := multipart.NewReader(h.rq.Body, attrs["boundary"])
			return db.ReadMultipartDocument(reader)
		}
	}
	return nil, &base.HTTPError{http.StatusUnsupportedMediaType, "Invalid content type " + contentType}
}
예제 #14
0
func (tree RevTree) UnmarshalJSON(inputjson []byte) (err error) {
	if tree == nil {
		base.Warn("No RevTree for input %q", inputjson)
		return nil
	}
	var rep revTreeList
	err = json.Unmarshal(inputjson, &rep)
	if err != nil {
		return
	}

	for i, revid := range rep.Revs {
		info := RevInfo{ID: revid}
		if rep.Bodies != nil && len(rep.Bodies[i]) > 0 {
			info.Body = []byte(rep.Bodies[i])
		}
		if rep.Channels != nil {
			info.Channels = rep.Channels[i]
		}
		parentIndex := rep.Parents[i]
		if parentIndex >= 0 {
			info.Parent = rep.Revs[parentIndex]
		}
		tree[revid] = &info
	}
	if rep.Deleted != nil {
		for _, i := range rep.Deleted {
			info := tree[rep.Revs[i]]
			info.Deleted = true //because tree[rep.Revs[i]].Deleted=true is a compile error
			tree[rep.Revs[i]] = info
		}
	}
	return
}
예제 #15
0
func (db *Database) queryAllDocs(reduce bool) (walrus.ViewResult, error) {
	opts := Body{"stale": false, "reduce": reduce}
	vres, err := db.Bucket.View("sync_housekeeping", "all_docs", opts)
	if err != nil {
		base.Warn("all_docs got error: %v", err)
	}
	return vres, err
}
예제 #16
0
func (h *handler) addJSON(value interface{}) {
	encoder := json.NewEncoder(h.response)
	err := encoder.Encode(value)
	if err != nil {
		base.Warn("Couldn't serialize JSON for %v", value)
		panic("JSON serialization failed")
	}
}
예제 #17
0
func (h *handler) addJSON(value interface{}) {
	jsonOut, err := json.Marshal(value)
	if err != nil {
		base.Warn("Couldn't serialize JSON for %v", value)
		panic("JSON serialization failed")
	}
	h.response.Write(jsonOut)
}
예제 #18
0
// Deletes a database (and all documents)
func (db *Database) Delete() error {
	opts := Body{"stale": false}
	vres, err := db.Bucket.View("sync_gateway", "all_bits", opts)
	if err != nil {
		base.Warn("all_bits view returned %v", err)
		return err
	}

	//FIX: Is there a way to do this in one operation?
	base.Log("Deleting %d documents of %q ...", len(vres.Rows), db.Name)
	for _, row := range vres.Rows {
		base.LogTo("CRUD", "\tDeleting %q", row.ID)
		if err := db.Bucket.Delete(row.ID); err != nil {
			base.Warn("Error deleting %q: %v", row.ID, err)
		}
	}
	return nil
}
예제 #19
0
// Given a newly changed document (received from the tap feed), adds change entries to channels.
// The JSON must be the raw document from the bucket, with the metadata and all.
func (c *changeCache) DocChanged(docID string, docJSON []byte) {
	entryTime := time.Now()
	// ** This method does not directly access any state of c, so it doesn't lock.
	go func() {
		// Is this a user/role doc?
		if strings.HasPrefix(docID, auth.UserKeyPrefix) {
			c.processPrincipalDoc(docID, docJSON, true)
			return
		} else if strings.HasPrefix(docID, auth.RoleKeyPrefix) {
			c.processPrincipalDoc(docID, docJSON, false)
			return
		}

		// First unmarshal the doc (just its metadata, to save time/memory):
		doc, err := unmarshalDocumentSyncData(docJSON, false)
		if err != nil || !doc.hasValidSyncData() {
			base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
			return
		}

		if doc.Sequence <= c.initialSequence {
			return // Tap is sending us an old value from before I started up; ignore it
		}

		// Record a histogram of the Tap feed's lag:
		tapLag := time.Since(doc.TimeSaved) - time.Since(entryTime)
		lagMs := int(tapLag/(100*time.Millisecond)) * 100
		changeCacheExpvars.Add(fmt.Sprintf("lag-tap-%04dms", lagMs), 1)

		// If the doc update wasted any sequences due to conflicts, add empty entries for them:
		for _, seq := range doc.UnusedSequences {
			base.LogTo("Cache", "Received unused #%d for (%q / %q)", seq, docID, doc.CurrentRev)
			change := &LogEntry{
				Sequence:     seq,
				TimeReceived: time.Now(),
			}
			c.processEntry(change)
		}

		// Now add the entry for the new doc revision:
		change := &LogEntry{
			Sequence:     doc.Sequence,
			DocID:        docID,
			RevID:        doc.CurrentRev,
			Flags:        doc.Flags,
			TimeReceived: time.Now(),
			TimeSaved:    doc.TimeSaved,
			Channels:     doc.Channels,
		}
		base.LogTo("Cache", "Received #%d after %3dms (%q / %q)", change.Sequence, int(tapLag/time.Millisecond), change.DocID, change.RevID)

		changedChannels := c.processEntry(change)
		if c.onChange != nil && len(changedChannels) > 0 {
			c.onChange(changedChannels)
		}
	}()
}
예제 #20
0
// Decodes an encoded ChangeLog.
func DecodeChangeLog(r *bytes.Reader, afterSeq uint64) (log *ChangeLog) {
	defer func() {
		if panicMsg := recover(); panicMsg != nil {
			// decodeChangeLog panicked.
			base.Warn("Panic from DecodeChangeLog: %v", panicMsg)
		}
	}()
	return decodeChangeLog(r, afterSeq)
}
예제 #21
0
// Adds a database to the ServerContext given its configuration.
func (sc *ServerContext) AddDatabaseFromConfig(config *DbConfig) error {
	server := "http://localhost:8091"
	pool := "default"
	bucketName := config.name

	if config.Server != nil {
		server = *config.Server
	}
	if config.Pool != nil {
		pool = *config.Pool
	}
	if config.Bucket != nil {
		bucketName = *config.Bucket
	}
	dbName := config.name
	if dbName == "" {
		dbName = bucketName
	}
	base.Log("Opening db /%s as bucket %q, pool %q, server <%s>",
		dbName, bucketName, pool, server)

	if err := db.ValidateDatabaseName(dbName); err != nil {
		return err
	}

	// Connect to the bucket and add the database:
	bucket, err := db.ConnectToBucket(server, pool, bucketName)
	if err != nil {
		return err
	}
	dbcontext, err := db.NewDatabaseContext(dbName, bucket)
	if err != nil {
		return err
	}
	if config.Sync != nil {
		if err := dbcontext.ApplySyncFun(*config.Sync); err != nil {
			return err
		}
	}

	if dbcontext.ChannelMapper == nil {
		base.Warn("Database %q sync function undefined; using default", dbName)
	}

	// Create default users & roles:
	if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil {
		return err
	} else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil {
		return err
	}

	// Register it so HTTP handlers can find it:
	if err := sc.registerDatabase(dbcontext); err != nil {
		return err
	}
	return nil
}
예제 #22
0
func (s *sequenceAllocator) _reserveSequences(numToReserve uint64) error {
	max, err := s.bucket.Incr("_sync:seq", numToReserve, numToReserve, 0)
	if err != nil {
		base.Warn("Error from Incr in _reserveSequences(%d): %v", numToReserve, err)
		return err
	}
	s.max = max
	s.last = max - numToReserve
	return nil
}
예제 #23
0
// Given a CouchDB document body about to be stored in the database, goes through the _attachments
// dict, finds attachments with inline bodies, copies the bodies into the Couchbase db, and replaces
// the bodies with the 'digest' attributes which are the keys to retrieving them.
func (db *Database) storeAttachments(doc *document, body Body, generation int, parentRev string) error {
	var parentAttachments map[string]interface{}
	atts := BodyAttachments(body)
	if atts == nil && body["_attachments"] != nil {
		return base.HTTPErrorf(400, "Invalid _attachments")
	}
	for name, value := range atts {
		meta, ok := value.(map[string]interface{})
		if !ok {
			return base.HTTPErrorf(400, "Invalid _attachments")
		}
		data, exists := meta["data"]
		if exists {
			// Attachment contains data, so store it in the db:
			attachment, err := decodeAttachment(data)
			if err != nil {
				return err
			}
			key, err := db.setAttachment(attachment)
			if err != nil {
				return err
			}
			delete(meta, "data")
			meta["stub"] = true
			meta["digest"] = string(key)
			meta["revpos"] = generation
			if meta["encoding"] == nil {
				meta["length"] = len(attachment)
				delete(meta, "encoded_length")
			} else {
				meta["encoded_length"] = len(attachment)
			}
		} else {
			// No data given; look it up from the parent revision.
			if parentAttachments == nil {
				parent, err := db.getAvailableRev(doc, parentRev)
				if err != nil {
					base.Warn("storeAttachments: no such parent rev %q to find %v", parentRev, meta)
					return err
				}
				parentAttachments, exists = parent["_attachments"].(map[string]interface{})
				if !exists {
					return base.HTTPErrorf(400, "Unknown attachment %s", name)
				}
			}
			parentAttachment := parentAttachments[name]
			if parentAttachment == nil {
				return base.HTTPErrorf(400, "Unknown attachment %s", name)
			}
			atts[name] = parentAttachment
		}
	}
	return nil
}
예제 #24
0
// Calls the JS sync function to assign the doc to channels, grant users
// access to channels, and reject invalid documents.
func (db *Database) getChannelsAndAccess(doc *document, body Body, parentRevID string) (result base.Set, access channels.AccessMap, roles channels.AccessMap, err error) {
	base.LogTo("CRUD+", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"])

	// Get the parent revision, to pass to the sync function:
	var oldJson string
	if parentRevID != "" {
		var oldJsonBytes []byte
		oldJsonBytes, err = db.getRevisionJSON(doc, parentRevID)
		if err != nil {
			if base.IsDocNotFoundError(err) {
				err = nil
			}
			return
		}
		oldJson = string(oldJsonBytes)
	}

	if db.ChannelMapper != nil {
		// Call the ChannelMapper:
		var output *channels.ChannelMapperOutput
		output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson,
			makeUserCtx(db.user))
		if err == nil {
			result = output.Channels
			access = output.Access
			roles = output.Roles
			err = output.Rejection
			if err != nil {
				base.Log("Sync fn rejected: new=%+v  old=%s --> %s", body, oldJson, err)
			} else if !validateAccessMap(access) || !validateRoleAccessMap(roles) {
				err = base.HTTPErrorf(500, "Error in JS sync function")
			}

		} else {
			base.Warn("Sync fn exception: %+v; doc = %s", err, body)
			err = base.HTTPErrorf(500, "Exception in JS sync function")
		}

	} else {
		// No ChannelMapper so by default use the "channels" property:
		value, _ := body["channels"].([]interface{})
		if value != nil {
			array := make([]string, 0, len(value))
			for _, channel := range value {
				channelStr, ok := channel.(string)
				if ok && len(channelStr) > 0 {
					array = append(array, channelStr)
				}
			}
			result, err = channels.SetFromArray(array, channels.KeepStar)
		}
	}
	return
}
예제 #25
0
func setMaxFileDescriptors(maxP *uint64) {
	maxFDs := DefaultMaxFileDescriptors
	if maxP != nil {
		maxFDs = *maxP
	}
	actualMax, err := base.SetMaxFileDescriptors(maxFDs)
	if err != nil {
		base.Warn("Error setting MaxFileDescriptors to %d: %v", maxFDs, err)
	} else if maxP != nil {
		base.Log("Configured process to allow %d open file descriptors", actualMax)
	}
}
예제 #26
0
파일: crud.go 프로젝트: racido/sync_gateway
// Are the principal and role names in an AccessMap all valid?
func validateAccessMap(access channels.AccessMap) bool {
	for name, _ := range access {
		if strings.HasPrefix(name, "role:") {
			name = name[5:] // Roles are identified in access view by a "role:" prefix
		}
		if !auth.IsValidPrincipalName(name) {
			base.Warn("Invalid principal name %q in access() or role() call", name)
			return false
		}
	}
	return true
}
예제 #27
0
// Deletes old revisions that have been moved to individual docs
func (db *Database) Compact() (int, error) {
	opts := Body{"stale": false, "reduce": false}
	vres, err := db.Bucket.View("sync_housekeeping", "old_revs", opts)
	if err != nil {
		base.Warn("old_revs view returned %v", err)
		return 0, err
	}

	//FIX: Is there a way to do this in one operation?
	base.Log("Deleting %d old revs of %q ...", len(vres.Rows), db.Name)
	count := 0
	for _, row := range vres.Rows {
		base.LogTo("CRUD", "\tDeleting %q", row.ID)
		if err := db.Bucket.Delete(row.ID); err != nil {
			base.Warn("Error deleting %q: %v", row.ID, err)
		} else {
			count++
		}
	}
	return count, nil
}
예제 #28
0
func parseRevID(revid string) (int, string) {
	if revid == "" {
		return 0, ""
	}
	var generation int
	var id string
	n, _ := fmt.Sscanf(revid, "%d-%s", &generation, &id)
	if n < 1 || generation < 1 {
		base.Warn("parseRevID failed on %q", revid)
		return -1, ""
	}
	return generation, id
}
예제 #29
0
func (doc *document) UnmarshalJSON(data []byte) error {
	if doc.ID == "" {
		panic("Doc was unmarshaled without ID set")
	}
	root := documentRoot{SyncData: &syncData{History: make(RevTree)}}
	err := json.Unmarshal([]byte(data), &root)
	if err != nil {
		base.Warn("Error unmarshaling doc %q: %s", doc.ID, err)
		return err
	}
	if root.SyncData != nil {
		doc.syncData = *root.SyncData
	}

	err = json.Unmarshal([]byte(data), &doc.body)
	if err != nil {
		base.Warn("Error unmarshaling body of doc %q: %s", doc.ID, err)
		return err
	}
	delete(doc.body, "_sync")
	return nil
}
예제 #30
0
// Sets the database context's channelMapper based on the JS code from config
func (context *DatabaseContext) ApplySyncFun(syncFun string) error {
	var err error
	if context.ChannelMapper != nil {
		_, err = context.ChannelMapper.SetFunction(syncFun)
	} else {
		context.ChannelMapper, err = channels.NewChannelMapper(syncFun)
	}
	if err != nil {
		base.Warn("Error setting sync function: %s", err)
		return err
	}
	return nil
}