// ADMIN API to turn Go CPU profiling on/off func (h *handler) handleCPUProfiling() error { var params struct { File string `json:"file"` } body, err := ioutil.ReadAll(h.rq.Body) if err != nil { return err } if len(body) > 0 { if err = json.Unmarshal(body, ¶ms); err != nil { return err } } if params.File != "" { base.Log("Profiling to %s ...", params.File) f, err := os.Create(params.File) if err != nil { return err } pprof.StartCPUProfile(f) } else { base.Log("...ending profile.") pprof.StopCPUProfile() } return nil }
// Starts and runs the server given its configuration. (This function never returns.) func RunServer(config *ServerConfig) { PrettyPrint = config.Pretty base.Log("==== %s ====", LongVersionString) if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 { cpus := runtime.NumCPU() if cpus > 1 { runtime.GOMAXPROCS(cpus) base.Log("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus) } } setMaxFileDescriptors(config.MaxFileDescriptors) sc := NewServerContext(config) for _, dbConfig := range config.Databases { if _, err := sc.AddDatabaseFromConfig(dbConfig); err != nil { base.LogFatal("Error opening database: %v", err) } } if config.ProfileInterface != nil { //runtime.MemProfileRate = 10 * 1024 base.Log("Starting profile server on %s", *config.ProfileInterface) go func() { http.ListenAndServe(*config.ProfileInterface, nil) }() } base.Log("Starting admin server on %s", *config.AdminInterface) go config.serve(*config.AdminInterface, CreateAdminHandler(sc)) base.Log("Starting server on %s ...", *config.Interface) config.serve(*config.Interface, CreatePublicHandler(sc)) }
func TestShadowerPush(t *testing.T) { //base.LogKeys["Shadow"] = true bucket := makeExternalBucket() defer bucket.Close() db := setupTestDB(t) defer tearDownTestDB(t, db) var err error db.Shadower, err = NewShadower(db.DatabaseContext, bucket, nil) assertNoError(t, err, "NewShadower") key1rev1, err := db.Put("key1", Body{"aaa": "bbb"}) assertNoError(t, err, "Put") _, err = db.Put("key2", Body{"ccc": "ddd"}) assertNoError(t, err, "Put") base.Log("Waiting for shadower to catch up...") var doc1, doc2 Body waitFor(t, func() bool { return bucket.Get("key1", &doc1) == nil && bucket.Get("key2", &doc2) == nil }) assert.DeepEquals(t, doc1, Body{"aaa": "bbb"}) assert.DeepEquals(t, doc2, Body{"ccc": "ddd"}) base.Log("Deleting local doc") db.DeleteDoc("key1", key1rev1) waitFor(t, func() bool { err = bucket.Get("key1", &doc1) return err != nil }) assert.True(t, base.IsDocNotFoundError(err)) }
// Starts and runs the server given its configuration. (This function never returns.) func RunServer(config *ServerConfig) { PrettyPrint = config.Pretty if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 { cpus := runtime.NumCPU() if cpus > 1 { runtime.GOMAXPROCS(cpus) base.Log("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus) } } sc := NewServerContext(config) for _, dbConfig := range config.Databases { if err := sc.AddDatabaseFromConfig(dbConfig); err != nil { base.LogFatal("Error opening database: %v", err) } } base.Log("Starting admin server on %s", *config.AdminInterface) go func() { if err := http.ListenAndServe(*config.AdminInterface, CreateAdminHandler(sc)); err != nil { base.LogFatal("HTTP server failed: %v", err) } }() base.Log("Starting server on %s ...", *config.Interface) if err := http.ListenAndServe(*config.Interface, CreatePublicHandler(sc)); err != nil { base.LogFatal("HTTP server failed: %v", err) } }
// POST /_persona creates a browserID-based login session and sets its cookie. // It's API-compatible with the CouchDB plugin: <https://github.com/iriscouch/browserid_couchdb/> func (h *handler) handlePersonaPOST() error { var params struct { Assertion string `json:"assertion"` } err := db.ReadJSONFromMIME(h.rq.Header, h.rq.Body, ¶ms) if err != nil { return err } origin := h.server.config.Persona.Origin if origin == "" { base.Warn("Can't accept Persona logins: Server URL not configured") return &base.HTTPError{http.StatusInternalServerError, "Server url not configured"} } // OK, now verify it: base.Log("Persona: Verifying assertion %q for %q", params.Assertion, origin) verifiedInfo, err := VerifyPersona(params.Assertion, origin) if err != nil { base.Log("Persona: Failed verify: %v", err) return err } base.Log("Persona: Logged in %q!", verifiedInfo.Email) createUserIfNeeded := h.server.config.Persona.Register return h.makeSessionFromEmail(verifiedInfo.Email, createUserIfNeeded) }
func (sc *serverContext) installPrincipals(context *context, spec map[string]json.RawMessage, what string) error { for name, data := range spec { isUsers := (what == "user") if name == "GUEST" && isUsers { name = "" } newPrincipal, err := context.auth.UnmarshalPrincipal(data, name, 1, isUsers) if err != nil { return fmt.Errorf("Invalid config for %s %q: %v", what, name, err) } oldPrincipal, err := context.auth.GetPrincipal(newPrincipal.Name(), isUsers) if oldPrincipal == nil || name == "" { if err == nil { err = context.auth.Save(newPrincipal) } if err != nil { return fmt.Errorf("Couldn't create %s %q: %v", what, name, err) } else if name == "" { base.Log("Reset guest user to config") } else { base.Log("Created %s %q", what, name) } } } return nil }
func TestShadowerPattern(t *testing.T) { bucket := makeExternalBucket() defer bucket.Close() bucket.Set("key1", 0, Body{"foo": 1}) bucket.Set("ignorekey", 0, Body{"bar": -1}) bucket.Set("key2", 0, Body{"bar": -1}) db := setupTestDB(t) defer tearDownTestDB(t, db) pattern, _ := regexp.Compile(`key\d+`) shadower, err := NewShadower(db.DatabaseContext, bucket, pattern) assertNoError(t, err, "NewShadower") defer shadower.Stop() base.Log("Waiting for shadower to catch up...") waitFor(t, func() bool { seq, _ := db.LastSequence() return seq >= 1 }) doc1, _ := db.GetDoc("key1") docI, _ := db.GetDoc("ignorekey") doc2, _ := db.GetDoc("key2") assert.DeepEquals(t, doc1.body, Body{"foo": float64(1)}) assert.True(t, docI == nil) assert.DeepEquals(t, doc2.body, Body{"bar": float64(-1)}) }
func (h *handler) checkAuth() error { h.user = nil if h.context == nil || h.context.auth == nil { return nil } // Check cookie first, then HTTP auth: var err error h.user, err = h.context.auth.AuthenticateCookie(h.rq) if err != nil { return err } var userName, password string if h.user == nil { userName, password = h.getBasicAuth() h.user = h.context.auth.AuthenticateUser(userName, password) } if h.user == nil && !h.admin { cookie, _ := h.rq.Cookie(auth.CookieName) base.Log("Auth failed for username=%q, cookie=%q", userName, cookie) h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`) return &base.HTTPError{http.StatusUnauthorized, "Invalid login"} } return nil }
func TestImport(t *testing.T) { db := setupTestDB(t) defer tearDownTestDB(t, db) // Add docs to the underlying bucket: for i := 1; i <= 20; i++ { db.Bucket.Add(fmt.Sprintf("alreadyHere%d", i), 0, Body{"key1": i, "key2": "hi"}) } // Make sure they aren't visible thru the gateway: doc, err := db.GetDoc("alreadyHere1") assert.Equals(t, doc, (*document)(nil)) assert.Equals(t, err.(*base.HTTPError).Status, 404) // Import them: count, err := db.UpdateAllDocChannels(false, true) assertNoError(t, err, "ApplySyncFun") assert.Equals(t, count, 20) // Now they're visible: doc, err = db.GetDoc("alreadyHere1") base.Log("doc = %+v", doc) assert.True(t, doc != nil) assertNoError(t, err, "can't get doc") }
func (sc *ServerContext) installPrincipals(context *db.DatabaseContext, spec map[string]*PrincipalConfig, what string) error { for name, princ := range spec { princ.Name = &name _, err := updatePrincipal(context, *princ, (what == "user"), (name == "GUEST")) if err != nil { // A conflict error just means updatePrincipal didn't overwrite an existing user. if status, _ := base.ErrorAsHTTPStatus(err); status != http.StatusConflict { return fmt.Errorf("Couldn't create %s %q: %v", what, name, err) } } else if name == "GUEST" { base.Log(" Reset guest user to config") } else { base.Log(" Created %s %q", what, name) } } return nil }
// Adds a database to the ServerContext given its configuration. func (sc *ServerContext) AddDatabaseFromConfig(config *DbConfig) error { server := "http://localhost:8091" pool := "default" bucketName := config.name if config.Server != nil { server = *config.Server } if config.Pool != nil { pool = *config.Pool } if config.Bucket != nil { bucketName = *config.Bucket } dbName := config.name if dbName == "" { dbName = bucketName } base.Log("Opening db /%s as bucket %q, pool %q, server <%s>", dbName, bucketName, pool, server) if err := db.ValidateDatabaseName(dbName); err != nil { return err } // Connect to the bucket and add the database: bucket, err := db.ConnectToBucket(server, pool, bucketName) if err != nil { return err } dbcontext, err := db.NewDatabaseContext(dbName, bucket) if err != nil { return err } if config.Sync != nil { if err := dbcontext.ApplySyncFun(*config.Sync); err != nil { return err } } if dbcontext.ChannelMapper == nil { base.Warn("Database %q sync function undefined; using default", dbName) } // Create default users & roles: if err := sc.installPrincipals(dbcontext, config.Roles, "role"); err != nil { return err } else if err := sc.installPrincipals(dbcontext, config.Users, "user"); err != nil { return err } // Register it so HTTP handlers can find it: if err := sc.registerDatabase(dbcontext); err != nil { return err } return nil }
func (sc *ServerContext) applySyncFunction(dbcontext *db.DatabaseContext, syncFn string) error { changed, err := dbcontext.UpdateSyncFun(syncFn) if err != nil || !changed { return err } // Sync function has changed: base.Log("**NOTE:** %q's sync function has changed. The new function may assign different channels to documents, or permissions to users. You may want to re-sync the database to update these.", dbcontext.Name) return nil }
// Helper function to open a Couchbase connection and return a specific bucket. func ConnectToBucket(couchbaseURL, poolName, bucketName string) (bucket base.Bucket, err error) { bucket, err = base.GetBucket(couchbaseURL, poolName, bucketName) if err != nil { return } base.Log("Connected to <%s>, pool %s, bucket %s", couchbaseURL, poolName, bucketName) err = installViews(bucket) return }
// Starts and runs the server given its configuration. (This function never returns.) func RunServer(config *ServerConfig) { PrettyPrint = config.Pretty sc := newServerContext(config) for _, dbConfig := range config.Databases { if err := sc.addDatabaseFromConfig(dbConfig); err != nil { base.LogFatal("Error opening database: %v", err) } } http.Handle("/", createHandler(sc)) base.Log("Starting auth server on %s", *config.AdminInterface) StartAuthListener(*config.AdminInterface, sc) base.Log("Starting server on %s ...", *config.Interface) if err := http.ListenAndServe(*config.Interface, nil); err != nil { base.LogFatal("Server failed: ", err.Error()) } }
// HTTP handler for a POST to _bulk_docs func (h *handler) handleBulkDocs() error { body, err := h.readJSON() if err != nil { return err } newEdits, ok := body["new_edits"].(bool) if !ok { newEdits = true } docs := body["docs"].([]interface{}) h.db.ReserveSequences(uint64(len(docs))) result := make([]db.Body, 0, len(docs)) for _, item := range docs { doc := item.(map[string]interface{}) docid, _ := doc["_id"].(string) var err error var revid string if newEdits { if docid != "" { revid, err = h.db.Put(docid, doc) } else { docid, revid, err = h.db.Post(doc) } } else { revisions := db.ParseRevisions(doc) if revisions == nil { err = &base.HTTPError{http.StatusBadRequest, "Bad _revisions"} } else { revid = revisions[0] err = h.db.PutExistingRev(docid, doc, revisions) } } status := db.Body{} if docid != "" { status["id"] = docid } if err != nil { code, msg := base.ErrorAsHTTPStatus(err) status["status"] = code status["error"] = base.CouchHTTPErrorName(code) status["reason"] = msg base.Log("\tBulkDocs: Doc %q --> %v", docid, err) err = nil // wrote it to output already; not going to return it } else { status["rev"] = revid } result = append(result, status) } h.writeJSONStatus(http.StatusCreated, result) return nil }
// POST /_persona creates a browserID-based login session and sets its cookie. // It's API-compatible with the CouchDB plugin: <https://github.com/iriscouch/browserid_couchdb/> func (h *handler) handlePersonaPOST() error { var params struct { Assertion string `json:"assertion"` } err := db.ReadJSONFromMIME(h.rq.Header, h.rq.Body, ¶ms) if err != nil { return err } origin := h.server.config.Persona.Origin if origin == "" { base.Warn("Can't accept Persona logins: Server URL not configured") return &base.HTTPError{http.StatusInternalServerError, "Server url not configured"} } // OK, now verify it: base.Log("Persona: Verifying assertion %q for %q", params.Assertion, origin) verifiedInfo, err := VerifyPersona(params.Assertion, origin) if err != nil { base.Log("Persona: Failed verify: %v", err) return err } base.Log("Persona: Logged in %q!", verifiedInfo.Email) // Email is verified. Look up the user and make a login session for her: user, err := h.db.Authenticator().GetUserByEmail(verifiedInfo.Email) if err != nil { return err } if user == nil { // The email address is authentic but we have no user account for it. if !h.server.config.Persona.Register { return &base.HTTPError{http.StatusUnauthorized, "No such user"} } // Create a User with the given email address as username and a random password. user, err = h.registerPersonaUser(verifiedInfo) if err != nil { return err } } return h.makeSession(user) }
// ADMIN API to turn Go CPU profiling on/off func (h *handler) handleProfiling() error { profileName := h.PathVar("name") var params struct { File string `json:"file"` } body, err := h.readBody() if err != nil { return err } if len(body) > 0 { if err = json.Unmarshal(body, ¶ms); err != nil { return err } } if params.File != "" { f, err := os.Create(params.File) if err != nil { return err } if profileName != "" { defer f.Close() if profile := pprof.Lookup(profileName); profile != nil { profile.WriteTo(f, 0) base.Log("Wrote %s profile to %s", profileName, params.File) } else { return base.HTTPErrorf(http.StatusNotFound, "No such profile %q", profileName) } } else { base.Log("Starting CPU profile to %s ...", params.File) pprof.StartCPUProfile(f) } } else { if profileName != "" { return base.HTTPErrorf(http.StatusBadRequest, "Missing JSON 'file' parameter") } else { base.Log("...ending CPU profile.") pprof.StopCPUProfile() } } return nil }
// Calls the JS sync function to assign the doc to channels, grant users // access to channels, and reject invalid documents. func (db *Database) getChannelsAndAccess(doc *document, body Body, parentRevID string) (result base.Set, access channels.AccessMap, roles channels.AccessMap, err error) { base.LogTo("CRUD+", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"]) // Get the parent revision, to pass to the sync function: var oldJson string if parentRevID != "" { var oldJsonBytes []byte oldJsonBytes, err = db.getRevisionJSON(doc, parentRevID) if err != nil { if base.IsDocNotFoundError(err) { err = nil } return } oldJson = string(oldJsonBytes) } if db.ChannelMapper != nil { // Call the ChannelMapper: var output *channels.ChannelMapperOutput output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson, makeUserCtx(db.user)) if err == nil { result = output.Channels access = output.Access roles = output.Roles err = output.Rejection if err != nil { base.Log("Sync fn rejected: new=%+v old=%s --> %s", body, oldJson, err) } else if !validateAccessMap(access) || !validateRoleAccessMap(roles) { err = base.HTTPErrorf(500, "Error in JS sync function") } } else { base.Warn("Sync fn exception: %+v; doc = %s", err, body) err = base.HTTPErrorf(500, "Exception in JS sync function") } } else { // No ChannelMapper so by default use the "channels" property: value, _ := body["channels"].([]interface{}) if value != nil { array := make([]string, 0, len(value)) for _, channel := range value { channelStr, ok := channel.(string) if ok && len(channelStr) > 0 { array = append(array, channelStr) } } result, err = channels.SetFromArray(array, channels.KeepStar) } } return }
func setMaxFileDescriptors(maxP *uint64) { maxFDs := DefaultMaxFileDescriptors if maxP != nil { maxFDs = *maxP } actualMax, err := base.SetMaxFileDescriptors(maxFDs) if err != nil { base.Warn("Error setting MaxFileDescriptors to %d: %v", maxFDs, err) } else if maxP != nil { base.Log("Configured process to allow %d open file descriptors", actualMax) } }
// Re-runs the channelMapper on every document in the database. // To be used when the JavaScript channelmap function changes. func (db *Database) UpdateAllDocChannels() error { base.Log("Recomputing document channels...") vres, err := db.Bucket.View("sync_gateway", "all_docs", Body{"stale": false, "reduce": false}) if err != nil { return err } for _, row := range vres.Rows { docid := row.Key.(string) key := db.realDocID(docid) err := db.Bucket.Update(key, 0, func(currentValue []byte) ([]byte, error) { // Be careful: this block can be invoked multiple times if there are races! if currentValue == nil { return nil, couchbase.UpdateCancel // someone deleted it?! } doc, err := unmarshalDocument(docid, currentValue) if err != nil { return nil, err } body, err := db.getRevFromDoc(doc, "", false) if err != nil { return nil, err } parentRevID := doc.History[doc.CurrentRev].Parent channels, access, err := db.getChannelsAndAccess(doc, body, parentRevID) if err != nil { // Probably the validator rejected the doc access = nil channels = nil } db.updateDocAccess(doc, access) db.updateDocChannels(doc, channels) base.Log("\tSaving updated channels and access grants of %q", docid) return json.Marshal(doc) }) if err != nil && err != couchbase.UpdateCancel { base.Warn("Error updating doc %q: %v", docid, err) } } return nil }
func (sc *ServerContext) RemoveDatabase(dbName string) bool { sc.lock.Lock() defer sc.lock.Unlock() context := sc.databases_[dbName] if context == nil { return false } base.Log("Closing db /%s (bucket %q)", context.Name, context.Bucket.GetName()) context.Close() delete(sc.databases_, dbName) return true }
// Queries the 'channels' view to get a range of sequences of a single channel as LogEntries. func (dbc *DatabaseContext) getChangesInChannelFromView( channelName string, endSeq uint64, options ChangesOptions) (LogEntries, error) { start := time.Now() // Query the view: optMap := changesViewOptions(channelName, endSeq, options) base.LogTo("Cache", " Querying 'channels' view for %q (start=#%d, end=#%d, limit=%d)", channelName, options.Since+1, endSeq, options.Limit) vres := channelsViewResult{} err := dbc.Bucket.ViewCustom("sync_gateway", "channels", optMap, &vres) if err != nil { base.Log("Error from 'channels' view: %v", err) return nil, err } else if len(vres.Rows) == 0 { base.LogTo("Cache", " Got no rows from view for %q", channelName) return nil, nil } // Convert the output to LogEntries: entries := make(LogEntries, 0, len(vres.Rows)) for _, row := range vres.Rows { entry := &LogEntry{ Sequence: uint64(row.Key[1].(float64)), DocID: row.ID, RevID: row.Value.Rev, Flags: row.Value.Flags, TimeReceived: time.Now(), } // base.LogTo("Cache", " Got view sequence #%d (%q / %q)", entry.Sequence, entry.DocID, entry.RevID) entries = append(entries, entry) } base.LogTo("Cache", " Got %d rows from view for %q: #%d ... #%d", len(entries), channelName, entries[0].Sequence, entries[len(entries)-1].Sequence) if elapsed := time.Since(start); elapsed > 200*time.Millisecond { base.Log("changes_view: Query took %v to return %d rows, options = %#v", elapsed, len(entries), optMap) } changeCacheExpvars.Add("view_queries", 1) return entries, nil }
func TestShadowerPull(t *testing.T) { bucket := makeExternalBucket() defer bucket.Close() bucket.Set("key1", 0, Body{"foo": 1}) bucket.Set("key2", 0, Body{"bar": -1}) bucket.SetRaw("key3", 0, []byte("qwertyuiop")) //will be ignored db := setupTestDB(t) defer tearDownTestDB(t, db) shadower, err := NewShadower(db.DatabaseContext, bucket, nil) assertNoError(t, err, "NewShadower") defer shadower.Stop() base.Log("Waiting for shadower to catch up...") var doc1, doc2 *document waitFor(t, func() bool { seq, _ := db.LastSequence() return seq >= 2 }) doc1, _ = db.GetDoc("key1") doc2, _ = db.GetDoc("key2") assert.DeepEquals(t, doc1.body, Body{"foo": float64(1)}) assert.DeepEquals(t, doc2.body, Body{"bar": float64(-1)}) base.Log("Deleting remote doc") bucket.Delete("key1") waitFor(t, func() bool { seq, _ := db.LastSequence() return seq >= 3 }) doc1, _ = db.GetDoc("key1") assert.True(t, doc1.hasFlag(channels.Deleted)) _, err = db.Get("key1") assert.DeepEquals(t, err, &base.HTTPError{Status: 404, Message: "deleted"}) }
// FOR TESTS ONLY: Blocks until the given sequence has been received. func (c *changeCache) waitForSequence(sequence uint64) { var i int for i = 0; i < 20; i++ { c.lock.Lock() nextSequence := c.nextSequence c.lock.Unlock() if nextSequence >= sequence+1 { base.Log("waitForSequence(%d) took %d ms", sequence, i*100) return } time.Sleep(100 * time.Millisecond) } panic(fmt.Sprintf("changeCache: Sequence %d never showed up!", sequence)) }
// POST a report of database statistics func (sc *ServerContext) reportStats() { if sc.config.DeploymentID == nil { panic("Can't reportStats without DeploymentID") } stats := sc.Stats() if stats == nil { return // No activity } base.Log("Reporting server stats to %s ...", kStatsReportURL) body, _ := json.Marshal(stats) bodyReader := bytes.NewReader(body) _, err := sc.HTTPClient.Post(kStatsReportURL, "application/json", bodyReader) if err != nil { base.Warn("Error posting stats: %v", err) } }
func (sc *ServerContext) startStatsReporter() { interval := kStatsReportInterval if sc.config.StatsReportInterval != nil { if *sc.config.StatsReportInterval <= 0 { return } interval = time.Duration(*sc.config.StatsReportInterval) * time.Second } sc.statsTicker = time.NewTicker(interval) go func() { for _ = range sc.statsTicker.C { sc.reportStats() } }() base.Log("Will report server stats for %q every %v", *sc.config.DeploymentID, interval) }
// Deletes a database (and all documents) func (db *Database) Delete() error { opts := Body{"stale": false} vres, err := db.Bucket.View("sync_gateway", "all_bits", opts) if err != nil { base.Warn("all_bits view returned %v", err) return err } //FIX: Is there a way to do this in one operation? base.Log("Deleting %d documents of %q ...", len(vres.Rows), db.Name) for _, row := range vres.Rows { base.LogTo("CRUD", "\tDeleting %q", row.ID) if err := db.Bucket.Delete(row.ID); err != nil { base.Warn("Error deleting %q: %v", row.ID, err) } } return nil }
// Calls the JS sync function to assign the doc to channels, grant users // access to channels, and reject invalid documents. func (db *Database) getChannelsAndAccess(doc *document, body Body, parentRevID string) (result channels.Set, access channels.AccessMap, err error) { base.LogTo("CRUD", "Invoking sync on doc %q rev %s", doc.ID, body["_rev"]) var oldJson string if parentRevID != "" { oldJson = string(doc.getRevisionJSON(parentRevID)) } if db.ChannelMapper != nil { var output *channels.ChannelMapperOutput output, err = db.ChannelMapper.MapToChannelsAndAccess(body, oldJson, makeUserCtx(db.user)) if err == nil { result = output.Channels access = output.Access err = output.Rejection if err != nil { base.Log("Sync fn rejected: new=%+v old=%s --> %s", body, oldJson, err) } else if !validateAccessMap(access) { err = &base.HTTPError{500, fmt.Sprintf("Error in JS sync function")} } } else { base.Warn("Sync fn exception: %+v; doc = %s", err, body) err = &base.HTTPError{500, "Exception in JS sync function"} } } else { // No ChannelMapper so by default use the "channels" property: value, _ := body["channels"].([]interface{}) if value != nil { array := make([]string, 0, len(value)) for _, channel := range value { channelStr, ok := channel.(string) if ok && len(channelStr) > 0 { array = append(array, channelStr) } } result, err = channels.SetFromArray(array, channels.KeepStar) } } return }
func (sc *ServerContext) startShadowing(dbcontext *db.DatabaseContext, shadow *ShadowConfig) error { var pattern *regexp.Regexp if shadow.Doc_id_regex != nil { var err error pattern, err = regexp.Compile(*shadow.Doc_id_regex) if err != nil { base.Warn("Invalid shadow doc_id_regex: %s", *shadow.Doc_id_regex) return err } } spec := base.BucketSpec{ Server: shadow.Server, PoolName: "default", BucketName: shadow.Bucket, } if shadow.Pool != nil { spec.PoolName = *shadow.Pool } if shadow.Username != "" { spec.Auth = shadow } bucket, err := db.ConnectToBucket(spec) if err != nil { return err } shadower, err := db.NewShadower(dbcontext, bucket, pattern) if err != nil { bucket.Close() return err } dbcontext.Shadower = shadower //Remove credentials from server URL before logging url, err := couchbase.ParseURL(spec.Server) if err == nil { base.Log("Database %q shadowing remote bucket %q, pool %q, server <%s:%s/%s>", dbcontext.Name, spec.BucketName, spec.PoolName, url.Scheme, url.Host, url.Path) } return nil }
func (h *handler) checkAuth(context *db.DatabaseContext) error { h.user = nil if context == nil { return nil } // Check cookie first: var err error h.user, err = context.Authenticator().AuthenticateCookie(h.rq) if err != nil { return err } else if h.user != nil { base.LogTo("HTTP+", "#%03d: Authenticated as %q via cookie", h.serialNumber, h.user.Name()) return nil } // If no cookie, check HTTP auth: if userName, password := h.getBasicAuth(); userName != "" { h.user = context.Authenticator().AuthenticateUser(userName, password) if h.user == nil { base.Log("HTTP auth failed for username=%q", userName) h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`) return &base.HTTPError{http.StatusUnauthorized, "Invalid login"} } if h.user.Name() != "" { base.LogTo("HTTP+", "#%03d: Authenticated as %q", h.serialNumber, h.user.Name()) } return nil } // No auth given -- check guest access if h.user, err = context.Authenticator().GetUser(""); err != nil { return err } if h.privs == regularPrivs && h.user.Disabled() { h.response.Header().Set("WWW-Authenticate", `Basic realm="Couchbase Sync Gateway"`) return &base.HTTPError{http.StatusUnauthorized, "Login required"} } return nil }