func (view *viewIndexer) Refresh() errors.Error { // trigger refresh of this indexer logging.Infof("Refreshing Indexes in keyspace %s", view.keyspace.Name()) indexMap := make(map[string]datastore.Index) primaryIndexMap := make(map[string]datastore.PrimaryIndex) //TODO need mutex here view.indexes = indexMap view.primary = primaryIndexMap indexes, err := loadViewIndexes(view) if err != nil { logging.Errorf(" Error loading indexes for bucket %s", view.keyspace.Name()) return errors.NewCbViewIndexesLoadingError(err, view.keyspace.Name()) } if len(indexes) == 0 { logging.Infof("No indexes found for bucket %s", view.keyspace.Name()) return nil } for _, index := range indexes { logging.Infof("Found index %s on keyspace %s", (*index).Name(), view.keyspace.Name()) name := (*index).Name() indexMap[name] = *index if name == PRIMARY_INDEX { primaryIndexMap[name] = (*index).(datastore.PrimaryIndex) } } return nil }
func (p *namespace) refresh(changed bool) { // trigger refresh of this pool logging.Infof("Refreshing pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { var client cb.Client logging.Errorf("Error updating pool name %s: Error %v", p.name, err) url := p.site.URL() /* transport := cbauth.WrapHTTPTransport(cb.HTTPTransport, nil) cb.HTTPClient.Transport = transport */ if p.site.CbAuthInit == true { client, err = cb.ConnectWithAuth(url, cbauth.NewAuthHandler(nil)) } else { client, err = cb.Connect(url) } if err != nil { logging.Errorf("Error connecting to URL %s", url) return } // check if the default pool exists newpool, err = client.GetPool(p.name) if err != nil { logging.Errorf("Retry Failed Error updating pool name %s: Error %v", p.name, err) return } p.site.client = client } p.lock.Lock() defer p.lock.Unlock() for name, ks := range p.keyspaceCache { logging.Infof(" Checking keyspace %s", name) _, err := newpool.GetBucket(name) if err != nil { changed = true ks.(*keyspace).deleted = true logging.Errorf(" Error retrieving bucket %s", name) delete(p.keyspaceCache, name) } // Not deleted. Check if GSI indexer is available if ks.(*keyspace).gsiIndexer == nil { ks.(*keyspace).refreshIndexer(p.site.URL(), p.Name()) } } if changed == true { p.setPool(newpool) } }
func newKeyspace(p *namespace, name string) (datastore.Keyspace, errors.Error) { cbNamespace := p.getPool() cbbucket, err := cbNamespace.GetBucket(name) if err != nil { logging.Infof(" keyspace %s not found %v", name, err) // go-couchbase caches the buckets // to be sure no such bucket exists right now // we trigger a refresh p.refresh(true) cbNamespace = p.getPool() // and then check one more time logging.Infof(" Retrying bucket %s", name) cbbucket, err = cbNamespace.GetBucket(name) if err != nil { // really no such bucket exists return nil, errors.NewCbKeyspaceNotFoundError(err, "keyspace "+name) } } if strings.EqualFold(cbbucket.Type, "memcached") { return nil, errors.NewCbBucketTypeNotSupportedError(nil, cbbucket.Type) } rv := &keyspace{ namespace: p, name: name, cbbucket: cbbucket, } // Initialize index providers rv.viewIndexer = newViewIndexer(rv) logging.Infof("Created New Bucket %s", name) //discover existing indexes if ierr := rv.loadIndexes(); ierr != nil { logging.Warnf("Error loading indexes for keyspace %s, Error %v", name, ierr) } var qerr errors.Error rv.gsiIndexer, qerr = gsi.NewGSIIndexer(p.site.URL(), p.Name(), name) if qerr != nil { logging.Warnf("Error loading GSI indexes for keyspace %s. Error %v", name, qerr) } return rv, nil }
func newViewIndex(name string, on datastore.IndexKey, where expression.Expression, view *viewIndexer) (*viewIndex, error) { doc, err := newDesignDoc(name, view.keyspace.Name(), on, where) if err != nil { return nil, err } inst := viewIndex{ name: name, using: datastore.VIEW, on: on, where: where, ddoc: doc, view: view, keyspace: view.keyspace, } logging.Infof("Created index %s on %s with key %v on where %v", name, view.keyspace.Name(), on, where) err = inst.putDesignDoc() if err != nil { return nil, err } err = inst.WaitForIndex() if err != nil { return nil, err } return &inst, nil }
func (view *viewIndexer) CreatePrimaryIndex(name string, with value.Value) (datastore.PrimaryIndex, errors.Error) { // if name is not provided then use default name #primary if name == "" { name = PRIMARY_INDEX } if _, exists := view.indexes[name]; exists { return nil, errors.NewCbViewExistsError(nil, name) } // if the name matches any of the unusable indexes, return an error for _, iname := range view.nonUsableIndexes { if name == iname { return nil, errors.NewCbViewExistsError(nil, "Non usuable index "+name) } } if with != nil { return nil, errors.NewCbViewsWithNotAllowedError(nil, "") } logging.Infof("Creating primary index %s", name) idx, err := newViewPrimaryIndex(view, name) if err != nil { return nil, errors.NewCbViewCreateError(err, name) } view.indexes[idx.Name()] = idx view.primary[idx.Name()] = idx return idx, nil }
func (view *viewIndexer) CreateIndex(name string, equalKey, rangeKey expression.Expressions, where expression.Expression, with value.Value) (datastore.Index, errors.Error) { if _, exists := view.indexes[name]; exists { return nil, errors.NewCbViewExistsError(nil, name) } // if the name matches any of the unusable indexes, return an error for _, iname := range view.nonUsableIndexes { if name == iname { return nil, errors.NewCbViewExistsError(nil, "Non usuable index "+name) } } if with != nil { return nil, errors.NewCbViewsWithNotAllowedError(nil, "") } logging.Infof("Creating index %s with equal key %v range key %v", name, equalKey, rangeKey) idx, err := newViewIndex(name, datastore.IndexKey(rangeKey), where, view) if err != nil { return nil, errors.NewCbViewCreateError(err, name) } view.indexes[idx.Name()] = idx return idx, nil }
func (view *viewIndexer) loadViewIndexes() errors.Error { // #alldocs implicitly exists // and recreate remaining from ddocs indexes, err := loadViewIndexes(view) if err != nil { return errors.NewCbLoadIndexesError(err, "Keyspace "+view.KeyspaceId()) } if len(indexes) == 0 { logging.Errorf("No view indexes found for bucket %s", view.keyspace.Name()) return errors.NewCbPrimaryIndexNotFoundError(nil, "Keyspace "+view.keyspace.Name()+". Create a primary index ") } for _, index := range indexes { logging.Infof("Found index on keyspace %s", (*index).KeyspaceId()) name := (*index).Name() view.indexes[name] = *index if name == PRIMARY_INDEX { view.primary[name] = (*index).(datastore.PrimaryIndex) } } return nil }
func doAuth(username, password, bucket string, requested datastore.Privilege) (bool, error) { logging.Infof(" Authenticating for bucket %s username %s password %s", bucket, username, password) creds, err := cbauth.Auth(username, password) if err != nil { return false, err } if requested == datastore.PRIV_DDL { authResult, err := creds.CanDDLBucket(bucket) if err != nil || authResult == false { return false, err } } else if requested == datastore.PRIV_WRITE { authResult, err := creds.CanAccessBucket(bucket) if err != nil || authResult == false { return false, err } } else if requested == datastore.PRIV_READ { authResult, err := creds.CanReadBucket(bucket) if err != nil || authResult == false { return false, err } } else { return false, fmt.Errorf("Invalid Privileges") } return true, nil }
func (b *keyspace) refreshIndexer(url string, poolName string) { var err error b.gsiIndexer, err = gsi.NewGSIIndexer(url, poolName, b.Name()) if err == nil { logging.Infof(" GSI Indexer loaded ") } }
func TestStub(t *testing.T) { logger := NewLogger(os.Stdout, logging.Debug, false) logging.SetLogger(logger) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.Warn, "This is a Request from %s", "test") logging.Requestf(logging.Info, "This is a Request from %s", "test") logger.Requestp(logging.Debug, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.Error, "This is a Request from ", logging.Pair{"name", "test"}) logger.SetLevel(logging.Warn) fmt.Printf("Log level is %s\n", logger.Level()) logger.Requestf(logging.Warn, "This is a Request from %s", "test") logging.Requestf(logging.Info, "This is a Request from %s", "test") logger.Requestp(logging.Debug, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.Error, "This is a Request from ", logging.Pair{"name", "test"}) logger.Warnf("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Debugp("This is a message from ", logging.Pair{"name", "test"}) logging.Errorp("This is a message from ", logging.Pair{"name", "test"}) fmt.Printf("Changing to json formatter\n") logger.entryFormatter = &jsonFormatter{} logger.SetLevel(logging.Debug) logger.Infof("This is a message from %s", "test") logging.Infof("This is a message from %s", "test") logger.Infop("This is a message from ", logging.Pair{"name", "test"}, logging.Pair{"Queue Size", 10}, logging.Pair{"Debug Mode", false}) logging.Infop("This is a message from ", logging.Pair{"name", "test"}) logger.Infom("This is a message from ", logging.Map{"name": "test", "Queue Size": 10, "Debug Mode": false}) logging.Infom("This is a message from ", logging.Map{"name": "test"}) logger.Requestf(logging.Warn, "This is a Request from %s", "test") logging.Requestf(logging.Info, "This is a Request from %s", "test") logger.Requestp(logging.Debug, "This is a Request from ", logging.Pair{"name", "test"}) logging.Requestp(logging.Error, "This is a Request from ", logging.Pair{"name", "test"}) }
func WalkViewInBatches(result chan cb.ViewRow, errs chan errors.Error, bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { logging.Errorf("View Walking Panic: %v\n%s", r, debug.Stack()) errs <- errors.NewCbViewsAccessError(nil, "Panic In walking view "+view) } }() options["limit"] = batchSize + 1 numRead := int64(0) ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Infof("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- errors.NewCbViewsAccessError(err, "View name "+view) return } for i, row := range vres.Rows { if int64(i) < batchSize { // dont process the last row, its just used to see if we // need to continue processing result <- row numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } }
func (vi *viewIndex) Drop() errors.Error { err := vi.DropViewIndex() if err != nil { return errors.NewCbViewsDropIndexError(err, vi.Name()) } // TODO need mutex delete(vi.view.indexes, vi.name) if vi.Name() == PRIMARY_INDEX { logging.Infof(" Primary index being dropped ") delete(vi.view.primary, vi.name) } return nil }
func initCbAuth(url string) (*cb.Client, error) { transport := cbauth.WrapHTTPTransport(cb.HTTPTransport, nil) cb.HTTPClient.Transport = transport client, err := cb.ConnectWithAuth(url, cbauth.NewAuthHandler(nil)) if err != nil { return nil, err } logging.Infof(" Initialization of cbauth succeeded ") return &client, nil }
func ViewTotalRows(bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}) (int64, errors.Error) { options["limit"] = 0 logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Infof("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { return 0, errors.NewCbViewsAccessError(err, "View Name"+view) } return int64(vres.TotalRows), nil }
func (b *keyspace) Delete(deletes []string) ([]string, errors.Error) { failedDeletes := make([]string, 0) actualDeletes := make([]string, 0) var err error for _, key := range deletes { if err = b.cbbucket.Delete(key); err != nil { if !isNotFoundError(err) { logging.Infof("Failed to delete key %s", key) failedDeletes = append(failedDeletes, key) } } else { actualDeletes = append(actualDeletes, key) } } if len(failedDeletes) > 0 { return actualDeletes, errors.NewCbDeleteFailedError(err, "Some keys were not deleted "+fmt.Sprintf("%v", failedDeletes)) } return actualDeletes, nil }
func (vi *viewIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) go WalkViewInBatches(viewRowChannel, viewErrChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), viewOptions, 1000, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { logging.Debugf("unable to convert index key to lookup value err:%v key %v", err, viewRow.Key) } } conn.EntryChannel() <- &entry sentRows = true numRows++ } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Infof("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name())) return } } conn.Error(err) return } } } logging.Infof("Number of entries fetched from the index %d", numRows) }
func loadViewIndexes(v *viewIndexer) ([]*datastore.Index, error) { b := v.keyspace rows, err := b.cbbucket.GetDDocs() if err != nil { return nil, err } inames := make([]string, 0, len(rows.Rows)) nonUsableIndexes := make([]string, 0) for _, row := range rows.Rows { cdoc := row.DDoc id := cdoc.Meta["id"].(string) if strings.HasPrefix(id, "_design/ddl_") { iname := strings.TrimPrefix(id, "_design/ddl_") inames = append(inames, iname) } else if strings.HasPrefix(id, "_design/dev_") { // append this to the list of non-usuable indexes iname := strings.TrimPrefix(id, "_design/dev_") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } else if strings.HasPrefix(id, "_design/") { iname := strings.TrimPrefix(id, "_design/") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } } indexes := make([]*datastore.Index, 0, len(inames)) for _, iname := range inames { ddname := "ddl_" + iname jdoc, err := getDesignDoc(b, ddname) if err != nil { return nil, err } jview, ok := jdoc.Views[iname] if !ok { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Missing view for index %v ", iname) continue } exprlist := make([]expression.Expression, 0, len(jdoc.IndexOn)) for _, ser := range jdoc.IndexOn { if iname == PRIMARY_INDEX { doc := expression.NewIdentifier(b.Name()) meta := expression.NewMeta(doc) mdid := expression.NewField(meta, expression.NewFieldName("id")) exprlist = append(exprlist, mdid) } else { expr, err := parser.Parse(ser) if err != nil { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Cannot unmarshal expression for index %v", iname) continue } exprlist = append(exprlist, expr) } } if len(exprlist) != len(jdoc.IndexOn) { continue } ddoc := designdoc{ name: ddname, viewname: iname, mapfn: jview.Map, reducefn: jview.Reduce, } if ddoc.checksum() != jdoc.IndexChecksum { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Warning - checksum failed on index %v", iname) continue } var index datastore.Index logging.Infof("Found index name %v keyspace %v", iname, b.Name()) if iname == PRIMARY_INDEX { index = &viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, } indexes = append(indexes, &index) } else { index = &viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, } indexes = append(indexes, &index) } } v.nonUsableIndexes = nonUsableIndexes if len(indexes) == 0 { return nil, nil } return indexes, nil }
func (s *site) Authorize(privileges datastore.Privileges, credentials datastore.Credentials) errors.Error { var authResult bool var err error if s.CbAuthInit == false { // cbauth is not initialized. No Authorization, access to SASL protected buckets will // not be allowed by couchbase server return nil } // if the authentication fails for any of the requested privileges return an error for keyspace, privilege := range privileges { if strings.Contains(keyspace, ":") { q := strings.Split(keyspace, ":") pool := q[0] keyspace = q[1] if strings.EqualFold(pool, "#system") { // trying auth on system keyspace return nil } } logging.Infof("Authenticating for keyspace %s", keyspace) if len(credentials) == 0 { authResult, err = doAuth(keyspace, "", keyspace, privilege) if authResult == false || err != nil { logging.Infof("Auth failed for keyspace %s", keyspace) return errors.NewDatastoreAuthorizationError(err, "Keyspace "+keyspace) } } else { //look for either the bucket name or the admin credentials for username, password := range credentials { userCreds := strings.Split(username, ":") if len(userCreds) > 1 && strings.EqualFold(userCreds[0], "admin") { authResult, err = doAuth(userCreds[1], password, keyspace, privilege) } else if len(userCreds) > 1 && userCreds[1] == keyspace { authResult, err = doAuth(userCreds[1], password, keyspace, privilege) } else { //try with empty password authResult, err = doAuth(keyspace, "", keyspace, privilege) } if err != nil { return errors.NewDatastoreAuthorizationError(err, "Keyspace "+keyspace) } // Auth succeeded if authResult == true { break } continue } } } if authResult == false { return errors.NewDatastoreAuthorizationError(err, "") } return nil }
func (b *keyspace) performOp(op int, inserts []datastore.Pair) ([]datastore.Pair, errors.Error) { if len(inserts) == 0 { return nil, errors.NewCbNoKeysInsertError(nil, ":(") } insertedKeys := make([]datastore.Pair, 0) var err error for _, kv := range inserts { key := kv.Key val := kv.Value.Actual() //mv := kv.Value.GetAttachment("meta") // TODO Need to also set meta switch op { case INSERT: var added bool // add the key to the backend added, err = b.cbbucket.Add(key, 0, val) if added == false { err = errors.NewError(err, "For Key "+key) } case UPDATE: // check if the key exists and if so then use the cas value // to update the key var meta map[string]interface{} var cas float64 an := kv.Value.(value.AnnotatedValue) meta = an.GetAttachment("meta").(map[string]interface{}) cas = meta["cas"].(float64) logging.Infof("CAS Value (Update) for key %v is %v", key, float64(cas)) if cas != 0 { err = b.cbbucket.Cas(key, 0, uint64(cas), val) } else { logging.Warnf("Warning: Cas value not found for key %v", key) err = b.cbbucket.Set(key, 0, val) } case UPSERT: err = b.cbbucket.Set(key, 0, val) } if err != nil { logging.Errorf("Failed to perform %s on key %s Error %v", opToString(op), key, err) } else { insertedKeys = append(insertedKeys, kv) } } if len(insertedKeys) == 0 { return nil, errors.NewCbDMLError(err, "Failed to perform "+opToString(op)) } return insertedKeys, nil }
// NewSite creates a new Couchbase site for the given url. func NewDatastore(u string) (s datastore.Datastore, e errors.Error) { var client cb.Client var cbAuthInit bool // try and initialize cbauth c, err := initCbAuth(u) if err != nil { logging.Errorf(" Unable to initialize cbauth. Error %v", err) url, err := url.Parse(u) if err != nil { return nil, errors.NewCbUrlParseError(err, "url "+u) } if url.User != nil { password, _ := url.User.Password() if password == "" { logging.Errorf("No password found in url %s", u) } // intialize cb_auth variables manually logging.Infof(" Trying to init cbauth with credentials %s %s %s", url.Host, url.User.Username(), password) set, err := cbauth.InternalRetryDefaultInit(url.Host, url.User.Username(), password) if set == false || err != nil { logging.Errorf(" Unable to initialize cbauth variables. Error %v", err) } else { c, err = initCbAuth("http://" + url.Host) if err != nil { logging.Errorf("Unable to initliaze cbauth. Error %v", err) } else { client = *c cbAuthInit = true } } } } else { client = *c cbAuthInit = true } if cbAuthInit == false { // connect without auth cb.HTTPClient = &http.Client{} client, err = cb.Connect(u) if err != nil { return nil, errors.NewCbConnectionError(err, "url "+u) } } site := &site{ client: client, namespaceCache: make(map[string]*namespace), CbAuthInit: cbAuthInit, } // initialize the default pool. // TODO can couchbase server contain more than one pool ? defaultPool, Err := loadNamespace(site, "default") if Err != nil { logging.Errorf("Cannot connect to default pool") return nil, Err } site.namespaceCache["default"] = defaultPool logging.Infof("New site created with url %s", u) return site, nil }