func (this *JSConverter) Visit(expr expression.Expression) string { var buf bytes.Buffer s, err := expr.Accept(this) if err != nil { logging.Errorf("Unexpected error in JSConverter: %v", err) return "" } switch s := s.(type) { case string: buf.WriteString(s) for this.stack.Size() != 0 { funcExpr := this.stack.Pop().(*funcExpr) buf.WriteString(funcExpr.name) if funcExpr.operands.Front() != nil { buf.WriteString(writeOperands(funcExpr.operands)) } } case []byte: buf.WriteString(string(s)) for this.stack.Size() != 0 { funcExpr := this.stack.Pop().(*funcExpr) buf.WriteString(funcExpr.name) if funcExpr.operands.Front() != nil { buf.WriteString(writeOperands(funcExpr.operands)) } } default: buf.WriteString(s.(string)) } return buf.String() }
func doNotFound(endpoint *HttpEndpoint, w http.ResponseWriter, req *http.Request) (interface{}, errors.Error) { acctStore := endpoint.server.AccountingStore() reg := acctStore.MetricRegistry() if reg == nil { logging.Errorf("http.NotFoundHandler - nil metric registry") } else { reg.Counter(accounting.INVALID_REQUESTS).Inc(1) } return nil, nil }
func CpuTimes() (int64, int64) { ru := syscall.Rusage{} if err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru); err != nil { logging.Errorf(err.Error()) return int64(0), int64(0) } newUtime := int64(ru.Utime.Nano()) newStime := int64(ru.Stime.Nano()) return newUtime, newStime }
func (pi *indexIndex) ScanEntries(requestId string, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // eliminate duplicate keys keys := make(map[string]string, 64) actualStore := pi.keyspace.namespace.store.actualStore namespaceIds, err := actualStore.NamespaceIds() if err == nil { for _, namespaceId := range namespaceIds { namespace, err := actualStore.NamespaceById(namespaceId) if err == nil { keyspaceIds, err := namespace.KeyspaceIds() if err == nil { for _, keyspaceId := range keyspaceIds { keyspace, err := namespace.KeyspaceById(keyspaceId) if err == nil { indexers, err := keyspace.Indexers() if err == nil { for _, indexer := range indexers { err = indexer.Refresh() if err != nil { logging.Errorf("Refreshing indexes failed %v", err) conn.Error(errors.NewSystemDatastoreError(err, "")) // don't return here but continue processing, because other keyspaces may still be responsive. MB-15834 continue } indexIds, err := indexer.IndexIds() if err == nil { for _, indexId := range indexIds { key := fmt.Sprintf("%s/%s/%s", namespaceId, keyspaceId, indexId) keys[key] = key } } } } } } } } } } for k, _ := range keys { entry := datastore.IndexEntry{PrimaryKey: k} conn.EntryChannel() <- &entry } }
func doParse(lex *lexer) { defer func() { r := recover() if r != nil { lex.Error(fmt.Sprintf("Error while parsing: %v", r)) // Log this error buf := make([]byte, 2048) n := runtime.Stack(buf, false) logging.Errorf("Error while parsing: %v\n%s", r, string(buf[0:n])) } }() yyParse(lex) }
func (view *viewIndexer) indexesUpdated(a, b map[string]datastore.Index) bool { if len(a) != len(b) { return true } view.RLock() defer view.RUnlock() defer func() { if err := recover(); err != nil { logging.Errorf("Panic in compare", err) } }() // if the checksum of each index is the same for name, idx_a := range a { idx_b, ok := b[name] if !ok { return true } switch idx_a.(type) { case *primaryIndex: if idx_a.(*primaryIndex).signature() != idx_b.(*primaryIndex).signature() { return true } default: if idx_a.(*viewIndex).signature() != idx_b.(*viewIndex).signature() { return true } } } return false }
func WalkViewInBatches(result chan cb.ViewRow, errs chan errors.Error, stop chan bool, bucket *cb.Bucket, ddoc string, view string, isPrimary bool, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { logging.Errorf("View Walking Panic: %v\n%s", r, debug.Stack()) errs <- errors.NewCbViewsAccessError(nil, "Panic In walking view "+view) } }() options["limit"] = batchSize + 1 numRead := int64(0) numSent := int64(0) keysSent := map[string]bool{} ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Debugf("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- errors.NewCbViewsAccessError(err, "View name "+view) return } for i, row := range vres.Rows { // dont process the last row, its just used to see if we // need to continue processing if int64(i) < batchSize { // Send the row if its primary key has not been sent if isPrimary || !keysSent[row.ID] { select { case result <- row: numSent += 1 case <-stop: ok = false break } } // For non primary views, mark the row's primary key as sent if !isPrimary { keysSent[row.ID] = true } numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } logging.Debugf("WalkViewInBatches %s: %d rows fetched, %d rows sent", view, numRead, numSent) }
func loadViewIndexes(v *viewIndexer) ([]*datastore.Index, error) { b := v.keyspace rows, err := b.cbbucket.GetDDocsWithRetry() if err != nil { return nil, err } inames := make([]string, 0, len(rows.Rows)) nonUsableIndexes := make([]string, 0) for _, row := range rows.Rows { cdoc := row.DDoc id := cdoc.Meta["id"].(string) if strings.HasPrefix(id, "_design/ddl_") { iname := strings.TrimPrefix(id, "_design/ddl_") inames = append(inames, iname) } else if strings.HasPrefix(id, "_design/dev_") { // append this to the list of non-usuable indexes iname := strings.TrimPrefix(id, "_design/dev_") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } else if strings.HasPrefix(id, "_design/") { iname := strings.TrimPrefix(id, "_design/") for _, name := range v.nonUsableIndexes { if iname == name { continue } } nonUsableIndexes = append(nonUsableIndexes, iname) } } indexes := make([]*datastore.Index, 0, len(inames)) for _, iname := range inames { ddname := "ddl_" + iname jdoc, err := getDesignDoc(b, ddname) if err != nil { return nil, err } jview, ok := jdoc.Views[iname] if !ok { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Missing view for index %v ", iname) continue } exprlist := make([]expression.Expression, 0, len(jdoc.IndexOn)) for _, ser := range jdoc.IndexOn { if jdoc.PrimaryIndex == true { doc := expression.NewIdentifier(b.Name()) meta := expression.NewMeta(doc) mdid := expression.NewField(meta, expression.NewFieldName("id", false)) exprlist = append(exprlist, mdid) } else { expr, err := parser.Parse(ser) if err != nil { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Cannot unmarshal expression for index %v", iname) continue } exprlist = append(exprlist, expr) } } if len(exprlist) != len(jdoc.IndexOn) { continue } var conditionExpr expression.Expression if jdoc.Condition != "" { conditionExpr, err = parser.Parse(jdoc.Condition) if err != nil { logging.Errorf("Unable to parse condition expression. Err %v", err) continue } } ddoc := designdoc{ name: ddname, viewname: iname, mapfn: jview.Map, reducefn: jview.Reduce, } if ddoc.checksum() != jdoc.IndexChecksum { nonUsableIndexes = append(nonUsableIndexes, iname) logging.Errorf("Warning - checksum failed on index %v", iname) continue } var index datastore.Index if jdoc.PrimaryIndex == true { index = &primaryIndex{ viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, where: conditionExpr, isPrimary: jdoc.PrimaryIndex, }, } } else { index = &viewIndex{ name: iname, keyspace: b, view: v, using: datastore.VIEW, ddoc: &ddoc, on: exprlist, where: conditionExpr, isPrimary: jdoc.PrimaryIndex, } } indexes = append(indexes, &index) } v.nonUsableIndexes = nonUsableIndexes if len(indexes) == 0 { return nil, nil } return indexes, nil }
func (vi *viewIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) doneChannel := make(chan bool) defer close(doneChannel) go WalkViewInBatches(viewRowChannel, viewErrChannel, doneChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), vi.IsPrimary(), viewOptions, _BATCH_SIZE, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 errs := make([]error, 0, 10) for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { errs = append(errs, fmt.Errorf("unable to convert index key to lookup value err:%v key %v", err, entry)) } } select { case conn.EntryChannel() <- &entry: sentRows = true numRows++ case <-conn.StopChannel(): logging.Debugf(" Asked to stop after sending %v rows", numRows) ok = false } } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Debugf("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name()+" or view index missing")) return } } conn.Error(err) return } } } if errs != nil { logging.Debugf("Errors with converting lookup value to entry key. num errrs %v", len(errs)) } logging.Debugf("Number of entries fetched from the index %d", numRows) }
func (b *keyspace) performOp(op int, inserts []datastore.Pair) ([]datastore.Pair, errors.Error) { if len(inserts) == 0 { return nil, nil } insertedKeys := make([]datastore.Pair, 0, len(inserts)) var err error for _, kv := range inserts { key := kv.Key val := kv.Value.Actual() //mv := kv.Value.GetAttachment("meta") // TODO Need to also set meta switch op { case INSERT: var added bool // add the key to the backend added, err = b.cbbucket.Add(key, 0, val) if added == false { // false & err == nil => given key aready exists in the bucket if err != nil { err = errors.NewError(err, "Key "+key) } else { err = errors.NewError(nil, "Duplicate Key "+key) } } case UPDATE: // check if the key exists and if so then use the cas value // to update the key var meta map[string]interface{} var cas uint64 var flags uint32 an := kv.Value.(value.AnnotatedValue) meta = an.GetAttachment("meta").(map[string]interface{}) cas, flags, err = getMeta(key, meta) if err != nil { // Don't perform the update if the meta values are not found logging.Errorf("Failed to get meta values for key %v, error %v", key, err) } else { logging.Debugf("CAS Value (Update) for key %v is %v flags %v value %v", key, uint64(cas), flags, val) _, err = b.cbbucket.CasWithMeta(key, int(flags), 0, uint64(cas), val) } case UPSERT: err = b.cbbucket.Set(key, 0, val) } if err != nil { if isEExistError(err) { logging.Errorf("Failed to perform update on key %s. CAS mismatch due to concurrent modifications", key) } else { logging.Errorf("Failed to perform %s on key %s for Keyspace %s Error %v", opToString(op), key, b.Name(), err) } } else { insertedKeys = append(insertedKeys, kv) } } if len(insertedKeys) == 0 { return nil, errors.NewCbDMLError(err, "Failed to perform "+opToString(op)) } return insertedKeys, nil }
func (p *namespace) refresh(changed bool) { // trigger refresh of this pool logging.Debugf("Refreshing pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { var client cb.Client logging.Errorf("Error updating pool name %s: Error %v", p.name, err) url := p.site.URL() /* transport := cbauth.WrapHTTPTransport(cb.HTTPTransport, nil) cb.HTTPClient.Transport = transport */ if p.site.CbAuthInit == true { client, err = cb.ConnectWithAuth(url, cbauth.NewAuthHandler(nil)) } else { client, err = cb.Connect(url) } if err != nil { logging.Errorf("Error connecting to URL %s", url) return } // check if the default pool exists newpool, err = client.GetPool(p.name) if err != nil { logging.Errorf("Retry Failed Error updating pool name %s: Error %v", p.name, err) return } p.site.client = client } p.lock.Lock() defer p.lock.Unlock() for name, ks := range p.keyspaceCache { logging.Debugf(" Checking keyspace %s", name) newbucket, err := newpool.GetBucket(name) if err != nil { changed = true ks.(*keyspace).deleted = true logging.Errorf(" Error retrieving bucket %s", name) delete(p.keyspaceCache, name) } else if ks.(*keyspace).cbbucket.UUID != newbucket.UUID { logging.Debugf(" UUid of keyspace %v uuid now %v", ks.(*keyspace).cbbucket.UUID, newbucket.UUID) // UUID has changed. Update the keyspace struct with the newbucket ks.(*keyspace).cbbucket = newbucket } // Not deleted. Check if GSI indexer is available if ks.(*keyspace).gsiIndexer == nil { ks.(*keyspace).refreshIndexer(p.site.URL(), p.Name()) } } if changed == true { p.setPool(newpool) } }
// NewSite creates a new Couchbase site for the given url. func NewDatastore(u string) (s datastore.Datastore, e errors.Error) { var client cb.Client var cbAuthInit bool // try and initialize cbauth c, err := initCbAuth(u) if err != nil { logging.Errorf(" Unable to initialize cbauth. Error %v", err) url, err := url.Parse(u) if err != nil { return nil, errors.NewCbUrlParseError(err, "url "+u) } if url.User != nil { password, _ := url.User.Password() if password == "" { logging.Errorf("No password found in url %s", u) } // intialize cb_auth variables manually logging.Infof(" Trying to init cbauth with credentials %s %s", url.Host, url.User.Username()) set, err := cbauth.InternalRetryDefaultInit(url.Host, url.User.Username(), password) if set == false || err != nil { logging.Errorf(" Unable to initialize cbauth variables. Error %v", err) } else { c, err = initCbAuth("http://" + url.Host) if err != nil { logging.Errorf("Unable to initliaze cbauth. Error %v", err) } else { client = *c cbAuthInit = true } } } } else { client = *c cbAuthInit = true } if cbAuthInit == false { // connect without auth logging.Warnf("Unable to intialize cbAuth, access to couchbase buckets may be restricted") cb.HTTPClient = &http.Client{} client, err = cb.Connect(u) if err != nil { return nil, errors.NewCbConnectionError(err, "url "+u) } } site := &site{ client: client, namespaceCache: make(map[string]*namespace), CbAuthInit: cbAuthInit, } // initialize the default pool. // TODO can couchbase server contain more than one pool ? defaultPool, Err := loadNamespace(site, "default") if Err != nil { logging.Errorf("Cannot connect to default pool") return nil, Err } site.namespaceCache["default"] = defaultPool logging.Infof("New site created with url %s", u) return site, nil }