func (p *namespace) refresh(changed bool) { // trigger refresh of this pool logging.Infof("Refreshing pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { var client cb.Client logging.Errorf("Error updating pool name %s: Error %v", p.name, err) url := p.site.URL() /* transport := cbauth.WrapHTTPTransport(cb.HTTPTransport, nil) cb.HTTPClient.Transport = transport */ if p.site.CbAuthInit == true { client, err = cb.ConnectWithAuth(url, cbauth.NewAuthHandler(nil)) } else { client, err = cb.Connect(url) } if err != nil { logging.Errorf("Error connecting to URL %s", url) return } // check if the default pool exists newpool, err = client.GetPool(p.name) if err != nil { logging.Errorf("Retry Failed Error updating pool name %s: Error %v", p.name, err) return } p.site.client = client } p.lock.Lock() defer p.lock.Unlock() for name, ks := range p.keyspaceCache { logging.Infof(" Checking keyspace %s", name) _, err := newpool.GetBucket(name) if err != nil { changed = true ks.(*keyspace).deleted = true logging.Errorf(" Error retrieving bucket %s", name) delete(p.keyspaceCache, name) } // Not deleted. Check if GSI indexer is available if ks.(*keyspace).gsiIndexer == nil { ks.(*keyspace).refreshIndexer(p.site.URL(), p.Name()) } } if changed == true { p.setPool(newpool) } }
func (view *viewIndexer) Refresh() errors.Error { // trigger refresh of this indexer logging.Infof("Refreshing Indexes in keyspace %s", view.keyspace.Name()) indexMap := make(map[string]datastore.Index) primaryIndexMap := make(map[string]datastore.PrimaryIndex) //TODO need mutex here view.indexes = indexMap view.primary = primaryIndexMap indexes, err := loadViewIndexes(view) if err != nil { logging.Errorf(" Error loading indexes for bucket %s", view.keyspace.Name()) return errors.NewCbViewIndexesLoadingError(err, view.keyspace.Name()) } if len(indexes) == 0 { logging.Infof("No indexes found for bucket %s", view.keyspace.Name()) return nil } for _, index := range indexes { logging.Infof("Found index %s on keyspace %s", (*index).Name(), view.keyspace.Name()) name := (*index).Name() indexMap[name] = *index if name == PRIMARY_INDEX { primaryIndexMap[name] = (*index).(datastore.PrimaryIndex) } } return nil }
func (view *viewIndexer) loadViewIndexes() errors.Error { // #alldocs implicitly exists // and recreate remaining from ddocs indexes, err := loadViewIndexes(view) if err != nil { return errors.NewCbLoadIndexesError(err, "Keyspace "+view.KeyspaceId()) } if len(indexes) == 0 { logging.Errorf("No view indexes found for bucket %s", view.keyspace.Name()) return errors.NewCbPrimaryIndexNotFoundError(nil, "Keyspace "+view.keyspace.Name()+". Create a primary index ") } for _, index := range indexes { logging.Infof("Found index on keyspace %s", (*index).KeyspaceId()) name := (*index).Name() view.indexes[name] = *index if name == PRIMARY_INDEX { view.primary[name] = (*index).(datastore.PrimaryIndex) } } return nil }
func WalkViewInBatches(result chan cb.ViewRow, errs chan errors.Error, bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { logging.Errorf("View Walking Panic: %v\n%s", r, debug.Stack()) errs <- errors.NewCbViewsAccessError(nil, "Panic In walking view "+view) } }() options["limit"] = batchSize + 1 numRead := int64(0) ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Infof("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- errors.NewCbViewsAccessError(err, "View name "+view) return } for i, row := range vres.Rows { if int64(i) < batchSize { // dont process the last row, its just used to see if we // need to continue processing result <- row numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } }
func (vi *viewIndex) Scan(span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) go WalkViewInBatches(viewRowChannel, viewErrChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), viewOptions, 1000, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { logging.Debugf("unable to convert index key to lookup value err:%v key %v", err, viewRow.Key) } } conn.EntryChannel() <- &entry sentRows = true numRows++ } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Infof("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name())) return } } conn.Error(err) return } } } logging.Infof("Number of entries fetched from the index %d", numRows) }
func loadViewIndexes(v *viewIndexer) ([]*datastore.Index, error) { b := v.keyspace rows, err := b.cbbucket.GetDDocs() if err != nil { return nil, err } inames := make([]string, 0, len(rows.Rows)) nonUsableIndexes := make([]string, 0) for _, row := range rows.Rows { cdoc := row.DDoc id := cdoc.Meta["id"].(string) if strings.HasPrefix(id, "_design/ddl_") { iname := strings.TrimPrefix(id, "_design/ddl_") inames = append(inames, iname) } else if strings.HasPrefix(id, "_design/dev_") { // append this to the list of non-usuable indexes iname := strings.TrimPrefix(id, "_design/dev_") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } else if strings.HasPrefix(id, "_design/") { iname := strings.TrimPrefix(id, "_design/") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } } indexes := make([]*datastore.Index, 0, len(inames)) for _, iname := range inames { ddname := "ddl_" + iname jdoc, err := getDesignDoc(b, ddname) if err != nil { return nil, err } jview, ok := jdoc.Views[iname] if !ok { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Missing view for index %v ", iname) continue } exprlist := make([]expression.Expression, 0, len(jdoc.IndexOn)) for _, ser := range jdoc.IndexOn { if iname == PRIMARY_INDEX { doc := expression.NewIdentifier(b.Name()) meta := expression.NewMeta(doc) mdid := expression.NewField(meta, expression.NewFieldName("id")) exprlist = append(exprlist, mdid) } else { expr, err := parser.Parse(ser) if err != nil { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Cannot unmarshal expression for index %v", iname) continue } exprlist = append(exprlist, expr) } } if len(exprlist) != len(jdoc.IndexOn) { continue } ddoc := designdoc{ name: ddname, viewname: iname, mapfn: jview.Map, reducefn: jview.Reduce, } if ddoc.checksum() != jdoc.IndexChecksum { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Warning - checksum failed on index %v", iname) continue } var index datastore.Index logging.Infof("Found index name %v keyspace %v", iname, b.Name()) if iname == PRIMARY_INDEX { index = &viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, } indexes = append(indexes, &index) } else { index = &viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, } indexes = append(indexes, &index) } } v.nonUsableIndexes = nonUsableIndexes if len(indexes) == 0 { return nil, nil } return indexes, nil }
func (b *keyspace) performOp(op int, inserts []datastore.Pair) ([]datastore.Pair, errors.Error) { if len(inserts) == 0 { return nil, errors.NewCbNoKeysInsertError(nil, ":(") } insertedKeys := make([]datastore.Pair, 0) var err error for _, kv := range inserts { key := kv.Key val := kv.Value.Actual() //mv := kv.Value.GetAttachment("meta") // TODO Need to also set meta switch op { case INSERT: var added bool // add the key to the backend added, err = b.cbbucket.Add(key, 0, val) if added == false { err = errors.NewError(err, "For Key "+key) } case UPDATE: // check if the key exists and if so then use the cas value // to update the key var meta map[string]interface{} var cas float64 an := kv.Value.(value.AnnotatedValue) meta = an.GetAttachment("meta").(map[string]interface{}) cas = meta["cas"].(float64) logging.Infof("CAS Value (Update) for key %v is %v", key, float64(cas)) if cas != 0 { err = b.cbbucket.Cas(key, 0, uint64(cas), val) } else { logging.Warnf("Warning: Cas value not found for key %v", key) err = b.cbbucket.Set(key, 0, val) } case UPSERT: err = b.cbbucket.Set(key, 0, val) } if err != nil { logging.Errorf("Failed to perform %s on key %s Error %v", opToString(op), key, err) } else { insertedKeys = append(insertedKeys, kv) } } if len(insertedKeys) == 0 { return nil, errors.NewCbDMLError(err, "Failed to perform "+opToString(op)) } return insertedKeys, nil }
// NewSite creates a new Couchbase site for the given url. func NewDatastore(u string) (s datastore.Datastore, e errors.Error) { var client cb.Client var cbAuthInit bool // try and initialize cbauth c, err := initCbAuth(u) if err != nil { logging.Errorf(" Unable to initialize cbauth. Error %v", err) url, err := url.Parse(u) if err != nil { return nil, errors.NewCbUrlParseError(err, "url "+u) } if url.User != nil { password, _ := url.User.Password() if password == "" { logging.Errorf("No password found in url %s", u) } // intialize cb_auth variables manually logging.Infof(" Trying to init cbauth with credentials %s %s %s", url.Host, url.User.Username(), password) set, err := cbauth.InternalRetryDefaultInit(url.Host, url.User.Username(), password) if set == false || err != nil { logging.Errorf(" Unable to initialize cbauth variables. Error %v", err) } else { c, err = initCbAuth("http://" + url.Host) if err != nil { logging.Errorf("Unable to initliaze cbauth. Error %v", err) } else { client = *c cbAuthInit = true } } } } else { client = *c cbAuthInit = true } if cbAuthInit == false { // connect without auth cb.HTTPClient = &http.Client{} client, err = cb.Connect(u) if err != nil { return nil, errors.NewCbConnectionError(err, "url "+u) } } site := &site{ client: client, namespaceCache: make(map[string]*namespace), CbAuthInit: cbAuthInit, } // initialize the default pool. // TODO can couchbase server contain more than one pool ? defaultPool, Err := loadNamespace(site, "default") if Err != nil { logging.Errorf("Cannot connect to default pool") return nil, Err } site.namespaceCache["default"] = defaultPool logging.Infof("New site created with url %s", u) return site, nil }