Ejemplo n.º 1
0
// Saves a new local revision to the external bucket.
func (s *Shadower) PushRevision(doc *document) {
	defer func() { atomic.AddUint64(&s.pushCount, 1) }()
	if !s.docIDMatches(doc.ID) {
		return
	} else if doc.newestRevID() == doc.UpstreamRev {
		return // This revision was pulled from the external bucket, so don't push it back!
	}

	var err error
	if doc.Flags&channels.Deleted != 0 {
		base.LogTo("Shadow", "Pushing %q, rev %q [deletion]", doc.ID, doc.CurrentRev)
		err = s.bucket.Delete(doc.ID)
	} else {
		base.LogTo("Shadow", "Pushing %q, rev %q", doc.ID, doc.CurrentRev)
		body := doc.getRevision(doc.CurrentRev)
		if body == nil {
			base.Warn("Can't get rev %q.%q to push to external bucket", doc.ID, doc.CurrentRev)
			return
		}
		err = s.bucket.Set(doc.ID, 0, body)
	}
	if err != nil {
		base.Warn("Error pushing rev of %q to external bucket: %v", doc.ID, err)
	}
}
Ejemplo n.º 2
0
// Adds a document body and/or its conflicts to a ChangeEntry
func (db *Database) addDocToChangeEntry(entry *ChangeEntry, options ChangesOptions) {
	includeConflicts := options.Conflicts && entry.branched
	if !options.IncludeDocs && !includeConflicts {
		return
	}
	doc, err := db.GetDoc(entry.ID)
	if err != nil {
		base.Warn("Changes feed: error getting doc %q: %v", entry.ID, err)
		return
	}

	revID := entry.Changes[0]["rev"]
	if includeConflicts {
		doc.History.forEachLeaf(func(leaf *RevInfo) {
			if leaf.ID != revID {
				if !leaf.Deleted {
					entry.Deleted = false
				}
				if !(options.ActiveOnly && leaf.Deleted) {
					entry.Changes = append(entry.Changes, ChangeRev{"rev": leaf.ID})
				}
			}
		})
	}
	if options.IncludeDocs {
		var err error
		entry.Doc, err = db.getRevFromDoc(doc, revID, false)
		if err != nil {
			base.Warn("Changes feed: error getting doc %q/%q: %v", doc.ID, revID, err)
		}
	}
}
Ejemplo n.º 3
0
func (k *kvChannelIndex) writeClockCas(updateClock base.SequenceClock) error {
	// Initial set, for the first cas update attempt
	k.clock.UpdateWithClock(updateClock)
	value, err := k.clock.Marshal()
	if err != nil {
		base.Warn("Error marshalling clock [%s] for update:%+v", base.PrintClock(k.clock), err)
		return err
	}
	casOut, err := base.WriteCasRaw(k.indexBucket, getChannelClockKey(k.channelName), value, k.clock.Cas(), 0, func(value []byte) (updatedValue []byte, err error) {
		// Note: The following is invoked upon cas failure - may be called multiple times
		writeErr := k.clock.Unmarshal(value)
		if writeErr != nil {
			base.Warn("Error unmarshalling clock during update", writeErr)
			return nil, writeErr
		}
		k.clock.UpdateWithClock(updateClock)
		return k.clock.Marshal()
	})

	if err != nil {
		return err
	}

	k.clock.SetCas(casOut)
	return nil
}
func (k *kvChangeIndexReader) stableSequenceChanged() bool {

	k.readerStableSequenceLock.Lock()
	defer k.readerStableSequenceLock.Unlock()
	if k.readerStableSequence == nil {
		var err error
		k.readerStableSequence, err = k.loadStableSequence()
		if err != nil {
			base.Warn("Error initializing reader stable sequence:%v", err)
			return false
		} else {
			base.Warn("Successfully loaded stable sequence")
		}
		return true
	}

	isChanged, err := k.readerStableSequence.Load()

	if err != nil {
		base.Warn("Error loading reader stable sequence")
		return false
	}

	return isChanged
}
Ejemplo n.º 5
0
// Writes an object to the response in JSON format.
// If status is nonzero, the header will be written with that status.
func (h *handler) writeJSONStatus(status int, value interface{}) {
	if !h.requestAccepts("application/json") {
		base.Warn("Client won't accept JSON, only %s", h.rq.Header.Get("Accept"))
		h.writeStatus(http.StatusNotAcceptable, "only application/json available")
		return
	}

	jsonOut, err := json.Marshal(value)
	if err != nil {
		base.Warn("Couldn't serialize JSON for %v : %s", value, err)
		h.writeStatus(http.StatusInternalServerError, "JSON serialization failed")
		return
	}
	if PrettyPrint {
		var buffer bytes.Buffer
		json.Indent(&buffer, jsonOut, "", "  ")
		jsonOut = append(buffer.Bytes(), '\n')
	}
	h.setHeader("Content-Type", "application/json")
	if h.rq.Method != "HEAD" {
		if len(jsonOut) < 1000 {
			h.disableResponseCompression()
		}
		h.setHeader("Content-Length", fmt.Sprintf("%d", len(jsonOut)))
		if status > 0 {
			h.response.WriteHeader(status)
			h.setStatus(status, "")
		}
		h.response.Write(jsonOut)
	} else if status > 0 {
		h.response.WriteHeader(status)
		h.setStatus(status, "")
	}
}
Ejemplo n.º 6
0
// Bulk get the blocks from the index bucket, and unmarshal into the provided map.
func (b *BitFlagStorage) bulkLoadBlocks(loadedBlocks map[string]IndexBlock) {
	// Do bulk retrieval of blocks, and unmarshal into loadedBlocks
	var keySet []string
	for key, _ := range loadedBlocks {
		keySet = append(keySet, key)
	}
	blocks, err := b.bucket.GetBulkRaw(keySet)
	if err != nil {
		// TODO FIX: if there's an error on a single block retrieval, differentiate between that
		//  and an empty/non-existent block.  Requires identification of expected blocks by the cache.
		base.Warn("Error doing bulk get:%v", err)
	}

	indexExpvars.Add("bulkGet_bulkLoadBlocks", 1)
	indexExpvars.Add("bulkGet_bulkLoadBlocksCount", int64(len(keySet)))

	// Unmarshal concurrently
	var wg sync.WaitGroup
	for key, blockBytes := range blocks {
		if len(blockBytes) > 0 {
			wg.Add(1)
			go func(key string, blockBytes []byte) {
				defer wg.Done()
				if err := loadedBlocks[key].Unmarshal(blockBytes); err != nil {
					base.Warn("Error unmarshalling block into map")
				}
			}(key, blockBytes)
		}
	}
	wg.Wait()
}
Ejemplo n.º 7
0
func collectAccessRelatedWarnings(config *DbConfig, context *db.DatabaseContext) []string {

	currentDb, err := db.GetDatabase(context, nil)
	if err != nil {
		base.Warn("Could not get database, skipping access related warnings")
	}

	numUsersInDb := 0

	// If no users defined in config, and no users were returned from the view, add warning.
	// NOTE: currently ignoring the fact that the config could contain only disabled=true users.
	if len(config.Users) == 0 {

		// There are no users in the config, but there might be users in the db.  Find out
		// by querying the "view principals" view which will return users and roles.  We only want to
		// find out if there is at least one user (or role) defined, so set limit == 1 to minimize
		// performance hit of query.
		viewOptions := db.Body{
			"stale": false,
			"limit": 1,
		}
		vres, err := currentDb.Bucket.View(db.DesignDocSyncGateway, db.ViewPrincipals, viewOptions)
		if err != nil {
			base.Warn("Error trying to query ViewPrincipals: %v", err)
			return []string{}
		}

		numUsersInDb = len(vres.Rows)

		if len(vres.Rows) == 0 {
			noUsersWarning := fmt.Sprintf("No users have been defined in the '%v' database, which means that you will not be able to get useful data out of the sync gateway over the standard port.  FIX: define users in the configuration json or via the REST API on the admin port, and grant users to channels via the admin_channels parameter.", currentDb.Name)

			return []string{noUsersWarning}

		}

	}

	// If the GUEST user is the *only* user defined, but it is disabled or has no access to channels, add warning
	guestUser, ok := config.Users[base.GuestUsername]
	if ok == true {
		// Do we have any other users?  If so, we're done.
		if len(config.Users) > 1 || numUsersInDb > 1 {
			return []string{}
		}
		if guestUser.Disabled == true || len(guestUser.ExplicitChannels) == 0 {
			noGuestChannelsWarning := fmt.Sprintf("The GUEST user is the only user defined in the '%v' database, but is either disabled or has no access to any channels.  This means that you will not be able to get useful data out of the sync gateway over the standard port.  FIX: enable and/or grant access to the GUEST user to channels via the admin_channels parameter.", currentDb.Name)
			return []string{noGuestChannelsWarning}
		}
	}

	return []string{}

}
Ejemplo n.º 8
0
func parseClockSequenceID(str string, sequenceHasher *sequenceHasher) (s SequenceID, err error) {

	if str == "" {
		return SequenceID{
			SeqType: ClockSequenceType,
			Clock:   base.NewSequenceClockImpl(),
		}, nil
	}

	s.SeqType = ClockSequenceType
	components := strings.Split(str, ":")
	if len(components) == 1 {
		// Convert simple zero to empty clock, to handle clients sending zero to mean 'no previous since'
		if components[0] == "0" {
			s.Clock = base.NewSequenceClockImpl()
		} else {
			// Standard clock hash
			if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
				return SequenceID{}, err
			}
		}
	} else if len(components) == 2 {
		// TriggeredBy Clock Hash, and vb.seq sequence
		if s.TriggeredByClock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}
		sequenceComponents := strings.Split(components[1], ".")
		if len(sequenceComponents) != 2 {
			base.Warn("Unexpected sequence format - ignoring and relying on triggered by")
			return
		} else {
			if vb64, err := strconv.ParseUint(sequenceComponents[0], 10, 16); err != nil {
				base.Warn("Unable to convert sequence %v to int.", sequenceComponents[0])
			} else {
				s.vbNo = uint16(vb64)
				s.Seq, err = strconv.ParseUint(sequenceComponents[1], 10, 64)
			}
		}

	} else if len(components) == 3 {
		// Low hash, and vb.seq sequence.  Use low hash as clock, ignore vb.seq
		if s.Clock, err = sequenceHasher.GetClock(components[0]); err != nil {
			return SequenceID{}, err
		}

	} else {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}

	if err != nil {
		err = base.HTTPErrorf(400, "Invalid sequence")
	}
	return s, err
}
func (k *kvChangeIndexWriter) addSetToChannelIndex(channelName string, entries []*LogEntry) error {
	writer, err := k.getOrCreateWriter(channelName)
	if err != nil {
		base.Warn("Unable to obtain channel writer - partition map not defined?")
		return err
	}
	err = writer.AddSet(entries)
	if err != nil {
		base.Warn("Error writing %d entries for channel [%s]: %+v", len(entries), channelName, err)
		return err
	}
	return nil
}
Ejemplo n.º 10
0
// Reads a single entry from the index
func (b *BitFlagStorage) readIndexEntryInto(vbNo uint16, sequence uint64, entry *LogEntry) error {
	key := getEntryKey(vbNo, sequence)
	value, _, err := b.bucket.GetRaw(key)
	if err != nil {
		base.Warn("Error retrieving entry from bucket for sequence %d, key %s", sequence, key)
		return err
	}
	err = json.Unmarshal(value, entry)
	if err != nil {
		base.Warn("Error unmarshalling entry for sequence %d, key %s", sequence, key)
		return err
	}
	return nil
}
Ejemplo n.º 11
0
func (sc *ServerContext) startShadowing(dbcontext *db.DatabaseContext, shadow *ShadowConfig) error {

	base.Warn("Bucket Shadowing feature comes with a number of limitations and caveats. See https://github.com/couchbase/sync_gateway/issues/1363 for more details.")

	var pattern *regexp.Regexp
	if shadow.Doc_id_regex != nil {
		var err error
		pattern, err = regexp.Compile(*shadow.Doc_id_regex)
		if err != nil {
			base.Warn("Invalid shadow doc_id_regex: %s", *shadow.Doc_id_regex)
			return err
		}
	}

	spec := base.BucketSpec{
		Server:     *shadow.Server,
		PoolName:   "default",
		BucketName: *shadow.Bucket,
		FeedType:   shadow.FeedType,
	}
	if shadow.Pool != nil {
		spec.PoolName = *shadow.Pool
	}
	if shadow.Username != "" {
		spec.Auth = shadow
	}

	bucket, err := base.GetBucket(spec, nil)

	if err != nil {
		err = base.HTTPErrorf(http.StatusBadGateway,
			"Unable to connect to shadow bucket: %s", err)
		return err
	}
	shadower, err := db.NewShadower(dbcontext, bucket, pattern)
	if err != nil {
		bucket.Close()
		return err
	}
	dbcontext.Shadower = shadower

	//Remove credentials from server URL before logging
	url, err := couchbase.ParseURL(spec.Server)
	if err == nil {
		base.Logf("Database %q shadowing remote bucket %q, pool %q, server <%s:%s/%s>", dbcontext.Name, spec.BucketName, spec.PoolName, url.Scheme, url.Host, url.Path)
	}
	return nil
}
Ejemplo n.º 12
0
// Moves a revision's ancestor's body out of the document object and into a separate db doc.
func (db *Database) backupAncestorRevs(doc *document, revid string) error {
	// Find an ancestor that still has JSON in the document:
	var json []byte
	for {
		if revid = doc.History.getParent(revid); revid == "" {
			return nil // No ancestors with JSON found
		} else if json = doc.getRevisionJSON(revid); json != nil {
			break
		}
	}

	// Store the JSON as a separate doc in the bucket:
	if err := db.setOldRevisionJSON(doc.ID, revid, json); err != nil {
		// This isn't fatal since we haven't lost any information; just warn about it.
		base.Warn("backupAncestorRevs failed: doc=%q rev=%q err=%v", doc.ID, revid, err)
		return err
	}

	// Nil out the rev's body in the document struct:
	if revid == doc.CurrentRev {
		doc.body = nil
	} else {
		doc.History.setRevisionBody(revid, nil)
	}
	base.LogTo("CRUD+", "Backed up obsolete rev %q/%q", doc.ID, revid)
	return nil
}
Ejemplo n.º 13
0
// POST /_persona creates a browserID-based login session and sets its cookie.
// It's API-compatible with the CouchDB plugin: <https://github.com/iriscouch/browserid_couchdb/>
func (h *handler) handlePersonaPOST() error {
	if len(h.rq.Header["Origin"]) > 0 {
		// CORS not allowed for login #115
		return base.HTTPErrorf(http.StatusBadRequest, "No CORS")
	}
	var params struct {
		Assertion string `json:"assertion"`
	}
	err := h.readJSONInto(&params)
	if err != nil {
		return err
	}

	origin := h.server.config.Persona.Origin
	if origin == "" {
		base.Warn("Can't accept Persona logins: Server URL not configured")
		return base.HTTPErrorf(http.StatusInternalServerError, "Server url not configured")
	}

	// OK, now verify it:
	base.Logf("Persona: Verifying assertion %q for %q", params.Assertion, origin)
	verifiedInfo, err := VerifyPersona(params.Assertion, origin)
	if err != nil {
		base.Logf("Persona: Failed verify: %v", err)
		return err
	}
	base.Logf("Persona: Logged in %q!", verifiedInfo.Email)

	createUserIfNeeded := h.server.config.Persona.Register
	return h.makeSessionFromEmail(verifiedInfo.Email, createUserIfNeeded)

}
Ejemplo n.º 14
0
// When CBGT is re-opening an existing PIndex (after a Sync Gw restart for example), it will call back this method.
func (sc *ServerContext) OpenSyncGatewayPIndexFactory(indexType, path string, restart func()) (cbgt.PIndexImpl, cbgt.Dest, error) {

	buf, err := ioutil.ReadFile(path +
		string(os.PathSeparator) + "SyncGatewayIndexParams.json")
	if err != nil {
		return nil, nil, err
	}

	indexParams := base.SyncGatewayIndexParams{}
	err = json.Unmarshal(buf, &indexParams)
	if err != nil {
		errWrap := fmt.Errorf("Could not unmarshal buf: %v err: %v", buf, err)
		log.Fatalf("%v", errWrap)
		return nil, nil, errWrap
	}

	pindexImpl, dest, err := sc.SyncGatewayPIndexFactoryCommon(indexParams)
	if err != nil {
		log.Fatalf("%v", err)
		return nil, nil, err
	}

	if pindexImpl == nil {
		base.Warn("PIndex for dest %v is nil", dest)
		return nil, nil, fmt.Errorf("PIndex for dest %v is nil.  This could be due to the pindex stored on disk not corresponding to any configured databases.", dest)
	}

	return pindexImpl, dest, nil

}
Ejemplo n.º 15
0
// Determines whether the clock hash should be calculated for the entry. For non-continuous changes feeds, hash is only calculated for
// the last entry sent (for use in last_seq), and is done in the defer for the main VectorMultiChangesFeed.
// For continuous changes feeds, we want to calculate the hash for every nth entry, where n=kContinuousHashFrequency.  To ensure that
// clients can restart a new changes feed based on any sequence in the continuous feed, we set the last hash calculated as the LowHash
// value on the sequence.
func (db *Database) calculateHashWhenNeeded(options ChangesOptions, entry *ChangeEntry, cumulativeClock base.SequenceClock, hashedEntryCount *int, continuousLastHash string) string {

	if options.Continuous != true {
		// For non-continuous, only need to calculate hash for lastSent entry.  Initialize empty clock and return
		entry.Seq.Clock = base.NewSyncSequenceClock()
	} else {
		// When hashedEntryCount == 0, recalculate hash
		if *hashedEntryCount == 0 {
			clockHash, err := db.SequenceHasher.GetHash(cumulativeClock)
			if err != nil {
				base.Warn("Error calculating hash for clock:%v", base.PrintClock(cumulativeClock))
				return continuousLastHash
			} else {
				entry.Seq.Clock = base.NewSyncSequenceClock()
				entry.Seq.Clock.SetHashedValue(clockHash)
				continuousLastHash = clockHash
			}
			*hashedEntryCount = kContinuousHashFrequency
		} else {
			entry.Seq.LowHash = continuousLastHash
			*hashedEntryCount--
		}
	}
	return continuousLastHash
}
Ejemplo n.º 16
0
func (auth *Authenticator) rebuildChannels(princ Principal) error {
	channels := princ.ExplicitChannels().Copy()

	// Changes for vbucket sequence management.  We can't determine relative ordering of sequences
	// across vbuckets.  To avoid redundant channel backfills during changes processing, we maintain
	// the previous vb/seq for a channel in PreviousChannels.  If that channel is still present during
	// this rebuild, we reuse the vb/seq from PreviousChannels (using UpdateIfPresent).  If PreviousChannels
	// is nil, reverts to normal sequence handling.

	previousChannels := princ.PreviousChannels().Copy()
	if previousChannels != nil {
		channels.UpdateIfPresent(previousChannels)
	}

	if auth.channelComputer != nil {
		viewChannels, err := auth.channelComputer.ComputeChannelsForPrincipal(princ)
		if err != nil {
			base.Warn("channelComputer.ComputeChannelsForPrincipal failed on %v: %v", princ, err)
			return err
		}
		if previousChannels != nil {
			viewChannels.UpdateIfPresent(previousChannels)
		}
		channels.Add(viewChannels)
	}
	// always grant access to the public document channel
	channels.AddChannel(ch.DocumentStarChannel, 1)

	base.LogTo("Access", "Computed channels for %q: %s", princ.Name(), channels)
	princ.SetPreviousChannels(nil)
	princ.setChannels(channels)

	return nil

}
Ejemplo n.º 17
0
// Main loop that pulls changes from the external bucket. (Runs in its own goroutine.)
func (s *Shadower) readTapFeed() {
	vbucketsFilling := 0
	for event := range s.tapFeed.Events() {
		switch event.Opcode {
		case sgbucket.TapBeginBackfill:
			if vbucketsFilling == 0 {
				base.LogTo("Shadow", "Reading history of external bucket")
			}
			vbucketsFilling++
			//base.LogTo("Shadow", "Reading history of external bucket")
		case sgbucket.TapMutation, sgbucket.TapDeletion:
			key := string(event.Key)
			if !s.docIDMatches(key) {
				break
			}
			isDeletion := event.Opcode == sgbucket.TapDeletion
			if !isDeletion && event.Expiry > 0 {
				break // ignore ephemeral documents
			}
			err := s.pullDocument(key, event.Value, isDeletion, event.Sequence, event.Flags)
			if err != nil {
				base.Warn("Error applying change from external bucket: %v", err)
			}
			atomic.AddUint64(&s.pullCount, 1)
		case sgbucket.TapEndBackfill:
			if vbucketsFilling--; vbucketsFilling == 0 {
				base.LogTo("Shadow", "Caught up with history of external bucket")
			}
		}
	}
	base.LogTo("Shadow", "End of tap feed(?)")
}
func (k *kvChangeIndexWriter) processPrincipalDoc(docID string, docJSON []byte, vbNo uint16, sequence uint64) (*LogEntry, error) {

	entryTime := time.Now()

	isUser := strings.HasPrefix(docID, auth.UserKeyPrefix)

	var err error
	if isUser {
		err = k.context.Authenticator().UpdateUserVbucketSequences(docID, sequence)
	} else {
		err = k.context.Authenticator().UpdateRoleVbucketSequences(docID, sequence)
	}

	if err != nil {
		base.Warn("Error updating principal doc %s: %v", docID, err)
		return nil, errors.New(fmt.Sprintf("kvChangeIndex: Error updating principal doc %q: %v", docID, err))
	}

	// Add to cache so that the stable sequence gets updated
	logEntry := &LogEntry{
		Sequence:     sequence,
		DocID:        docID,
		TimeReceived: time.Now(),
		VbNo:         vbNo,
		IsPrincipal:  true,
	}

	writeHistogram(changeCacheExpvars, entryTime, "lag-processPrincipalDoc")
	return logEntry, nil
}
Ejemplo n.º 19
0
// Parses a JSON MIME body, unmarshaling it into "into".
func ReadJSONFromMIME(headers http.Header, input io.Reader, into interface{}) error {
	contentType := headers.Get("Content-Type")
	if contentType != "" && !strings.HasPrefix(contentType, "application/json") {
		return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Invalid content type %s", contentType)
	}

	switch headers.Get("Content-Encoding") {
	case "gzip":
		var err error
		if input, err = gzip.NewReader(input); err != nil {
			return err
		}
	case "":
		break
	default:
		return base.HTTPErrorf(http.StatusUnsupportedMediaType, "Unsupported Content-Encoding; use gzip")
	}

	decoder := json.NewDecoder(input)
	if err := decoder.Decode(into); err != nil {
		base.Warn("Couldn't parse JSON in HTTP request: %v", err)
		return base.HTTPErrorf(http.StatusBadRequest, "Bad JSON")
	}
	return nil
}
Ejemplo n.º 20
0
// When CBGT is opening a PIndex for the first time, it will call back this method.
func (sc *ServerContext) NewSyncGatewayPIndexFactory(indexType, indexParams, path string, restart func()) (cbgt.PIndexImpl, cbgt.Dest, error) {

	os.MkdirAll(path, 0700)

	indexParamsStruct := base.SyncGatewayIndexParams{}
	err := json.Unmarshal([]byte(indexParams), &indexParamsStruct)
	if err != nil {
		return nil, nil, fmt.Errorf("Error unmarshalling indexParams: %v.  indexParams: %v", err, indexParams)
	}

	pathMeta := path + string(os.PathSeparator) + "SyncGatewayIndexParams.json"
	err = ioutil.WriteFile(pathMeta, []byte(indexParams), 0600)
	if err != nil {
		errWrap := fmt.Errorf("Error writing %v to %v. Err: %v", indexParams, pathMeta, err)
		log.Fatalf("%v", errWrap)
		return nil, nil, errWrap
	}

	pindexImpl, dest, err := sc.SyncGatewayPIndexFactoryCommon(indexParamsStruct)
	if err != nil {
		log.Fatalf("%v", err)
		return nil, nil, err
	}

	if pindexImpl == nil {
		base.Warn("PIndex for dest %v is nil", dest)
		return nil, nil, fmt.Errorf("PIndex for dest %v is nil.  This could be due to the pindex stored on disk not corresponding to any configured databases.", dest)
	}

	return pindexImpl, dest, nil

}
func (k *kvChangeIndexReader) GetStableClock() (clock base.SequenceClock, err error) {

	// Validation partition map is available.
	_, err = k.indexPartitionsCallback()
	if err != nil {
		// Unable to load partitions.  Check whether the index has data (stable counter is non-zero)
		count, err := base.LoadClockCounter(base.KStableSequenceKey, k.indexReadBucket)
		// Index has data, but we can't get partition map.  Return error
		if err == nil && count > 0 {
			return nil, errors.New("Error: Unable to retrieve index partition map, but index counter exists")
		} else {
			// Index doesn't have data.  Return zero clock as stable clock
			return base.NewSequenceClockImpl(), nil
		}
	}

	clock = base.NewSequenceClockImpl()
	stableShardedClock, err := k.loadStableSequence()
	if err != nil {
		base.Warn("Stable sequence and clock not found in index - returning err")
		return nil, err
	} else {
		clock = stableShardedClock.AsClock()
	}

	return clock, nil
}
Ejemplo n.º 22
0
// Adds a set
func (b *BitFlagStorage) AddEntrySet(entries []*LogEntry) (clockUpdates base.SequenceClock, err error) {

	// Update the sequences in the appropriate cache block
	if len(entries) == 0 {
		return clockUpdates, nil
	}

	// The set of updates may be distributed over multiple partitions and blocks.
	// To support this, iterate over the set, and define groups of sequences by block
	// TODO: this approach feels like it's generating a lot of GC work.  Considered an iterative
	//       approach where a set update returned a list of entries that weren't targeted at the
	//       same block as the first entry in the list, but this would force sequential
	//       processing of the blocks.  Might be worth revisiting if we see high GC overhead.
	blockSets := make(BlockSet)
	clockUpdates = base.NewSequenceClockImpl()
	for _, entry := range entries {
		// Update the sequence in the appropriate cache block
		base.LogTo("DIndex+", "Add to channel index [%s], vbNo=%d, isRemoval:%v", b.channelName, entry.VbNo, entry.isRemoved())
		blockKey := GenerateBlockKey(b.channelName, entry.Sequence, b.partitions.VbMap[entry.VbNo])
		if _, ok := blockSets[blockKey]; !ok {
			blockSets[blockKey] = make([]*LogEntry, 0)
		}
		blockSets[blockKey] = append(blockSets[blockKey], entry)
		clockUpdates.SetMaxSequence(entry.VbNo, entry.Sequence)
	}

	err = b.writeBlockSetsWithCas(blockSets)
	if err != nil {
		base.Warn("Error writing blockSets with cas for block %s: %+v", blockSets, err)
	}

	return clockUpdates, err
}
func (k *kvChangeIndexReader) GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error) {

	var sinceClock base.SequenceClock
	if options.Since.Clock == nil {
		// If there's no since clock, we may be in backfill for another channel - revert to the triggered by clock.
		if options.Since.TriggeredByClock != nil {
			sinceClock = options.Since.TriggeredByClock
		} else {
			sinceClock = base.NewSequenceClockImpl()
		}
	} else {
		sinceClock = options.Since.Clock
	}

	reader, err := k.getOrCreateReader(channelName, options)
	if err != nil {
		base.Warn("Error obtaining channel reader (need partition index?) for channel %s", channelName)
		return nil, err
	}
	changes, err := reader.getChanges(sinceClock)
	if err != nil {
		base.LogTo("DIndex+", "No clock found for channel %s, assuming no entries in index", channelName)
		return nil, nil
	}

	// Limit handling
	if options.Limit > 0 && len(changes) > options.Limit {
		limitResult := make([]*LogEntry, options.Limit)
		copy(limitResult[0:], changes[0:])
		return limitResult, nil
	}

	return changes, nil
}
Ejemplo n.º 24
0
func (c *changeCache) processPrincipalDoc(docID string, docJSON []byte, isUser bool) {
	// Currently the cache isn't really doing much with user docs; mostly it needs to know about
	// them because they have sequence numbers, so without them the sequence of sequences would
	// have gaps in it, causing later sequences to get stuck in the queue.
	princ, err := c.unmarshalPrincipal(docJSON, isUser)
	if princ == nil {
		base.Warn("changeCache: Error unmarshaling doc %q: %v", docID, err)
		return
	}
	sequence := princ.Sequence()
	c.lock.RLock()
	initialSequence := c.initialSequence
	c.lock.RUnlock()
	if sequence <= initialSequence {
		return // Tap is sending us an old value from before I started up; ignore it
	}

	// Now add the (somewhat fictitious) entry:
	change := &LogEntry{
		Sequence:     sequence,
		TimeReceived: time.Now(),
	}
	if isUser {
		change.DocID = "_user/" + princ.Name()
	} else {
		change.DocID = "_role/" + princ.Name()
	}

	base.LogTo("Cache", "Received #%d (%q)", change.Sequence, change.DocID)

	changedChannels := c.processEntry(change)
	if c.onChange != nil && len(changedChannels) > 0 {
		c.onChange(changedChannels)
	}
}
Ejemplo n.º 25
0
// Reads & parses the request body, handling either JSON or multipart.
func (h *handler) readDocument() (db.Body, error) {
	contentType, attrs, _ := mime.ParseMediaType(h.rq.Header.Get("Content-Type"))
	switch contentType {
	case "", "application/json":
		return h.readJSON()
	case "multipart/related":
		if DebugMultipart {
			raw, err := h.readBody()
			if err != nil {
				return nil, err
			}
			reader := multipart.NewReader(bytes.NewReader(raw), attrs["boundary"])
			body, err := db.ReadMultipartDocument(reader)
			if err != nil {
				ioutil.WriteFile("GatewayPUT.mime", raw, 0600)
				base.Warn("Error reading MIME data: copied to file GatewayPUT.mime")
			}
			return body, err
		} else {
			reader := multipart.NewReader(h.requestBody, attrs["boundary"])
			return db.ReadMultipartDocument(reader)
		}
	default:
		return nil, base.HTTPErrorf(http.StatusUnsupportedMediaType, "Invalid content type %s", contentType)
	}
}
Ejemplo n.º 26
0
// Bulk get the blocks from the index bucket, and unmarshal into the provided map.  Expects incoming keySet descending by vbucket and sequence.
// Returns entries ascending by vbucket and sequence
func (b *BitFlagStorage) bulkLoadEntries(keySet []string, blockEntries []*LogEntry) (results []*LogEntry) {
	// Do bulk retrieval of entries
	// TODO: do in batches if keySet is very large?

	entries, err := b.bucket.GetBulkRaw(keySet)
	if err != nil {
		base.Warn("Error doing bulk get:%v", err)
	}
	IndexExpvars.Add("bulkGet_bulkLoadEntries", 1)
	IndexExpvars.Add("bulkGet_bulkLoadEntriesCount", int64(len(keySet)))

	results = make([]*LogEntry, 0, len(blockEntries))
	// Unmarshal, deduplicate, and reorder to ascending by sequence within vbucket

	// TODO: unmarshalls and deduplicates sequentially on a single thread - consider unmarshalling in parallel, with a separate
	// iteration for deduplication - based on performance

	currentVb := uint16(0)                       // tracks current vb for deduplication, as we only need to deduplicate within vb
	currentVbDocIDs := make(map[string]struct{}) // set of doc IDs found in the current vb
	for _, entry := range blockEntries {
		// if vb has changed, reset the docID deduplication map
		if entry.VbNo != currentVb {
			currentVb = entry.VbNo
			currentVbDocIDs = make(map[string]struct{})
		}

		entryKey := getEntryKey(entry.VbNo, entry.Sequence)
		entryBytes, ok := entries[entryKey]
		if !ok || entryBytes == nil {
			base.Warn("Expected entry for %s in get bulk response - not found", entryKey)
			continue
		}
		removed := entry.IsRemoved()
		if err := json.Unmarshal(entryBytes, entry); err != nil {
			base.Warn("Error unmarshalling entry for key %s: %v", entryKey, err)
		}
		if _, exists := currentVbDocIDs[entry.DocID]; !exists {
			currentVbDocIDs[entry.DocID] = struct{}{}
			if removed {
				entry.SetRemoved()
			}
			// TODO: optimize performance for prepend?
			results = append([]*LogEntry{entry}, results...)
		}
	}
	return results
}
Ejemplo n.º 27
0
// Cleanup function, invoked periodically.
// Removes skipped entries from skippedSeqs that have been waiting longer
// than MaxChannelLogMissingWaitTime from the queue.  Attempts view retrieval
// prior to removal
func (c *changeCache) CleanSkippedSequenceQueue() bool {

	foundEntries, pendingDeletes := func() ([]*LogEntry, []uint64) {
		c.skippedSeqLock.Lock()
		defer c.skippedSeqLock.Unlock()

		var found []*LogEntry
		var deletes []uint64
		for _, skippedSeq := range c.skippedSeqs {
			if time.Since(skippedSeq.timeAdded) > c.options.CacheSkippedSeqMaxWait {
				// Attempt to retrieve the sequence from the view before we remove
				options := ChangesOptions{Since: SequenceID{Seq: skippedSeq.seq}}
				entries, err := c.context.getChangesInChannelFromView("*", skippedSeq.seq, options)
				if err != nil && len(entries) > 0 {
					// Found it - store to send to the caches.
					found = append(found, entries[0])
				} else {
					base.Warn("Skipped Sequence %d didn't show up in MaxChannelLogMissingWaitTime, and isn't available from the * channel view - will be abandoned.", skippedSeq.seq)
					deletes = append(deletes, skippedSeq.seq)
				}
			} else {
				// skippedSeqs are ordered by arrival time, so can stop iterating once we find one
				// still inside the time window
				break
			}
		}
		return found, deletes
	}()

	// Add found entries
	for _, entry := range foundEntries {
		c.processEntry(entry)
	}

	// Purge pending deletes
	for _, sequence := range pendingDeletes {
		err := c.RemoveSkipped(sequence)
		if err != nil {
			base.Warn("Error purging skipped sequence %d from skipped sequence queue", sequence)
		} else {
			dbExpvars.Add("abandoned_seqs", 1)
		}
	}

	return true
}
Ejemplo n.º 28
0
func (h *handler) addJSON(value interface{}) {
	encoder := json.NewEncoder(h.response)
	err := encoder.Encode(value)
	if err != nil {
		base.Warn("Couldn't serialize JSON for %v : %s", value, err)
		panic("JSON serialization failed")
	}
}
Ejemplo n.º 29
0
func (s *sequenceAllocator) lastSequence() (uint64, error) {
	dbExpvars.Add("sequence_gets", 1)
	last, err := s.incrWithRetry("_sync:seq", 0)
	if err != nil {
		base.Warn("Error from Incr in lastSequence(): %v", err)
	}
	return last, err
}
Ejemplo n.º 30
0
func (s *sequenceAllocator) incrWithRetry(key string, numToReserve uint64) (uint64, error) {

	var err error
	retries := 0
	for retries < kMaxIncrRetries {
		max, err := s.bucket.Incr(key, numToReserve, numToReserve, 0)
		if err != nil {
			retries++
			base.Warn("Error from Incr in sequence allocator (%d) - attempt (%d/%d): %v", numToReserve, retries, kMaxIncrRetries, err)
			time.Sleep(10 * time.Millisecond)
		} else {
			return max, err
		}
	}
	base.Warn("Too many failed Incr in sequence allocator - failing (%d): %v", numToReserve, err)
	return 0, err
}