func (view *viewIndexer) CreateIndex(requestId, name string, equalKey, rangeKey expression.Expressions, where expression.Expression, with value.Value) (datastore.Index, errors.Error) { view.Refresh() if _, exists := view.indexes[name]; exists { return nil, errors.NewCbViewExistsError(nil, name) } // if the name matches any of the unusable indexes, return an error for _, iname := range view.nonUsableIndexes { if name == iname { return nil, errors.NewCbViewExistsError(nil, "Non usuable index "+name) } } if with != nil { return nil, errors.NewCbViewsWithNotAllowedError(nil, "") } logging.Debugf("Creating index %s with equal key %v range key %v", name, equalKey, rangeKey) idx, err := newViewIndex(name, datastore.IndexKey(rangeKey), where, view) if err != nil { return nil, errors.NewCbViewCreateError(err, name) } view.Lock() defer view.Unlock() view.indexes[idx.Name()] = idx return idx, nil }
func doAuth(username, password, bucket string, requested datastore.Privilege) (bool, error) { logging.Debugf(" Authenticating for bucket %s username %s password %s", bucket, username, password) creds, err := cbauth.Auth(username, password) if err != nil { return false, err } if requested == datastore.PRIV_DDL { authResult, err := creds.CanDDLBucket(bucket) if err != nil || authResult == false { return false, err } } else if requested == datastore.PRIV_WRITE { authResult, err := creds.CanAccessBucket(bucket) if err != nil || authResult == false { return false, err } } else if requested == datastore.PRIV_READ { authResult, err := creds.CanReadBucket(bucket) if err != nil || authResult == false { return false, err } } else { return false, fmt.Errorf("Invalid Privileges") } return true, nil }
func (view *viewIndexer) PrimaryIndexes() ([]datastore.PrimaryIndex, errors.Error) { view.RLock() defer view.RUnlock() logging.Debugf(" Number of primary indexes on b0 %v", len(view.primary)) rv := make([]datastore.PrimaryIndex, 0, len(view.primary)) for _, index := range view.primary { rv = append(rv, index) } return rv, nil }
func ViewTotalRows(bucket *cb.Bucket, ddoc string, view string, options map[string]interface{}) (int64, errors.Error) { options["limit"] = 0 logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Debugf("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { return 0, errors.NewCbViewsAccessError(err, "View Name"+view) } return int64(vres.TotalRows), nil }
func (b *keyspace) Fetch(keys []string) ([]datastore.AnnotatedPair, []errors.Error) { if len(keys) == 0 { return nil, nil } bulkResponse, err := b.cbbucket.GetBulk(keys) if err != nil { // Ignore "Not found" keys if !isNotFoundError(err) { return nil, []errors.Error{errors.NewCbBulkGetError(err, "")} } } i := 0 rv := make([]datastore.AnnotatedPair, len(bulkResponse)) for k, v := range bulkResponse { var doc datastore.AnnotatedPair doc.Key = k Value := value.NewAnnotatedValue(value.NewValue(v.Body)) meta_flags := binary.BigEndian.Uint32(v.Extras[0:4]) meta_type := "json" if Value.Type() == value.BINARY { meta_type = "base64" } Value.SetAttachment("meta", map[string]interface{}{ "id": k, "cas": v.Cas, "type": meta_type, "flags": uint32(meta_flags), }) // Uncomment when needed //logging.Debugf("CAS Value for key %v is %v flags %v", k, uint64(v.Cas), meta_flags) doc.Value = Value rv[i] = doc i++ } logging.Debugf("Fetched %d keys ", i) return rv, nil }
func (view *viewIndexer) CreatePrimaryIndex(requestId, name string, with value.Value) ( datastore.PrimaryIndex, errors.Error) { // if name is not provided then use default name #primary if name == "" { name = PRIMARY_INDEX } view.Refresh() if _, exists := view.indexes[name]; exists { return nil, errors.NewCbViewExistsError(nil, name) } // if the name matches any of the unusable indexes, return an error for _, iname := range view.nonUsableIndexes { if name == iname { return nil, errors.NewCbViewExistsError(nil, "Non usuable index "+name) } } if with != nil { return nil, errors.NewCbViewsWithNotAllowedError(nil, "") } logging.Debugf("Creating primary index %s", name) idx, err := newViewPrimaryIndex(view, name) if err != nil { return nil, errors.NewCbViewCreateError(err, name) } view.Lock() defer view.Unlock() view.indexes[idx.Name()] = idx view.primary[idx.Name()] = idx return idx, nil }
func generateViewOptions(cons datastore.ScanConsistency, span *datastore.Span) map[string]interface{} { viewOptions := map[string]interface{}{} if span != nil { logging.Debugf("Scan range. %v", span) low := span.Range.Low high := span.Range.High inclusion := span.Range.Inclusion if low != nil { viewOptions["startkey"] = encodeValuesAsMapKey(low) if inclusion == datastore.NEITHER || inclusion == datastore.HIGH { viewOptions["startkey_docid"] = MAX_ID } } if high != nil { viewOptions["endkey"] = encodeValuesAsMapKey(high) if inclusion == datastore.NEITHER || inclusion == datastore.LOW { viewOptions["endkey_docid"] = MIN_ID } } if inclusion == datastore.BOTH || inclusion == datastore.HIGH { viewOptions["inclusive_end"] = true } } if cons == datastore.SCAN_PLUS || cons == datastore.AT_PLUS { viewOptions["stale"] = "false" } else if cons == datastore.UNBOUNDED { viewOptions["stale"] = "ok" } return viewOptions }
func (view *viewIndexer) CreateIndex(requestId, name string, seekKey, rangeKey expression.Expressions, where expression.Expression, with value.Value) (datastore.Index, errors.Error) { view.Refresh() if _, exists := view.indexes[name]; exists { return nil, errors.NewCbViewExistsError(nil, name) } // if the name matches any of the unusable indexes, return an error for _, iname := range view.nonUsableIndexes { if name == iname { return nil, errors.NewCbViewExistsError(nil, "Non usuable index "+name) } } logging.Debugf("Creating index %s with equal key %v range key %v", name, seekKey, rangeKey) var idx datastore.Index var err error if with != nil { idx, err = newViewIndexFromExistingMap(name, with.Actual().(string), rangeKey, view) } else { idx, err = newViewIndex(name, rangeKey, where, view) } if err != nil { return nil, errors.NewCbViewCreateError(err, name) } view.Lock() defer view.Unlock() view.indexes[idx.Name()] = idx return idx, nil }
func WalkViewInBatches(result chan cb.ViewRow, errs chan errors.Error, stop chan bool, bucket *cb.Bucket, ddoc string, view string, isPrimary bool, options map[string]interface{}, batchSize int64, limit int64) { if limit != 0 && limit < batchSize { batchSize = limit } defer close(result) defer close(errs) defer func() { r := recover() if r != nil { logging.Errorf("View Walking Panic: %v\n%s", r, debug.Stack()) errs <- errors.NewCbViewsAccessError(nil, "Panic In walking view "+view) } }() options["limit"] = batchSize + 1 numRead := int64(0) numSent := int64(0) keysSent := map[string]bool{} ok := true for ok { logURL, err := bucket.ViewURL(ddoc, view, options) if err == nil { logging.Debugf("Request View: %v", logURL) } vres, err := bucket.View(ddoc, view, options) if err != nil { errs <- errors.NewCbViewsAccessError(err, "View name "+view) return } for i, row := range vres.Rows { // dont process the last row, its just used to see if we // need to continue processing if int64(i) < batchSize { // Send the row if its primary key has not been sent if isPrimary || !keysSent[row.ID] { select { case result <- row: numSent += 1 case <-stop: ok = false break } } // For non primary views, mark the row's primary key as sent if !isPrimary { keysSent[row.ID] = true } numRead += 1 } } if (int64(len(vres.Rows)) > batchSize) && (limit == 0 || (limit != 0 && numRead < limit)) { // prepare for next run skey := vres.Rows[batchSize].Key skeydocid := vres.Rows[batchSize].ID options["startkey"] = skey options["startkey_docid"] = cb.DocID(skeydocid) } else { // stop ok = false } } logging.Debugf("WalkViewInBatches %s: %d rows fetched, %d rows sent", view, numRead, numSent) }
func (vi *viewIndex) Scan(requestId string, span *datastore.Span, distinct bool, limit int64, cons datastore.ScanConsistency, vector timestamp.Vector, conn *datastore.IndexConnection) { defer close(conn.EntryChannel()) // For primary indexes, bounds must always be strings, so we // can just enforce that directly viewOptions := map[string]interface{}{} viewOptions = generateViewOptions(cons, span) /*span.Range.Low, span.Range.High, span.Range.Inclusion) */ viewRowChannel := make(chan cb.ViewRow) viewErrChannel := make(chan errors.Error) doneChannel := make(chan bool) defer close(doneChannel) go WalkViewInBatches(viewRowChannel, viewErrChannel, doneChannel, vi.keyspace.cbbucket, vi.DDocName(), vi.ViewName(), vi.IsPrimary(), viewOptions, _BATCH_SIZE, limit) var viewRow cb.ViewRow var err errors.Error sentRows := false ok := true numRows := 0 errs := make([]error, 0, 10) for ok { select { case viewRow, ok = <-viewRowChannel: if ok { entry := datastore.IndexEntry{PrimaryKey: viewRow.ID} // try to add the view row key as the entry key (unless this is _all_docs) if vi.DDocName() != "" /* FIXME && vi.IsPrimary() == false */ { lookupValue, err := convertCouchbaseViewKeyToLookupValue(viewRow.Key) if err == nil { entry.EntryKey = lookupValue } else { errs = append(errs, fmt.Errorf("unable to convert index key to lookup value err:%v key %v", err, entry)) } } select { case conn.EntryChannel() <- &entry: sentRows = true numRows++ case <-conn.StopChannel(): logging.Debugf(" Asked to stop after sending %v rows", numRows) ok = false } } case err, ok = <-viewErrChannel: if err != nil { logging.Errorf("%v", err) // check to possibly detect a bucket that was already deleted if !sentRows { logging.Debugf("Checking bucket URI: %v", vi.keyspace.cbbucket.URI) _, err := http.Get(vi.keyspace.cbbucket.URI) if err != nil { logging.Errorf("%v", err) // remove this specific bucket from the pool cache vi.keyspace.namespace.lock.Lock() delete(vi.keyspace.namespace.keyspaceCache, vi.keyspace.Name()) vi.keyspace.namespace.lock.Unlock() // close this bucket vi.keyspace.Release() // ask the pool to refresh vi.keyspace.namespace.refresh(true) // bucket doesnt exist any more conn.Error(errors.NewCbViewsAccessError(nil, "keyspace "+vi.keyspace.Name()+" or view index missing")) return } } conn.Error(err) return } } } if errs != nil { logging.Debugf("Errors with converting lookup value to entry key. num errrs %v", len(errs)) } logging.Debugf("Number of entries fetched from the index %d", numRows) }
func (b *keyspace) performOp(op int, inserts []datastore.Pair) ([]datastore.Pair, errors.Error) { if len(inserts) == 0 { return nil, nil } insertedKeys := make([]datastore.Pair, 0, len(inserts)) var err error for _, kv := range inserts { key := kv.Key val := kv.Value.Actual() //mv := kv.Value.GetAttachment("meta") // TODO Need to also set meta switch op { case INSERT: var added bool // add the key to the backend added, err = b.cbbucket.Add(key, 0, val) if added == false { // false & err == nil => given key aready exists in the bucket if err != nil { err = errors.NewError(err, "Key "+key) } else { err = errors.NewError(nil, "Duplicate Key "+key) } } case UPDATE: // check if the key exists and if so then use the cas value // to update the key var meta map[string]interface{} var cas uint64 var flags uint32 an := kv.Value.(value.AnnotatedValue) meta = an.GetAttachment("meta").(map[string]interface{}) cas, flags, err = getMeta(key, meta) if err != nil { // Don't perform the update if the meta values are not found logging.Errorf("Failed to get meta values for key %v, error %v", key, err) } else { logging.Debugf("CAS Value (Update) for key %v is %v flags %v value %v", key, uint64(cas), flags, val) _, err = b.cbbucket.CasWithMeta(key, int(flags), 0, uint64(cas), val) } case UPSERT: err = b.cbbucket.Set(key, 0, val) } if err != nil { if isEExistError(err) { logging.Errorf("Failed to perform update on key %s. CAS mismatch due to concurrent modifications", key) } else { logging.Errorf("Failed to perform %s on key %s for Keyspace %s Error %v", opToString(op), key, b.Name(), err) } } else { insertedKeys = append(insertedKeys, kv) } } if len(insertedKeys) == 0 { return nil, errors.NewCbDMLError(err, "Failed to perform "+opToString(op)) } return insertedKeys, nil }
func (p *namespace) refresh(changed bool) { // trigger refresh of this pool logging.Debugf("Refreshing pool %s", p.name) newpool, err := p.site.client.GetPool(p.name) if err != nil { var client cb.Client logging.Errorf("Error updating pool name %s: Error %v", p.name, err) url := p.site.URL() /* transport := cbauth.WrapHTTPTransport(cb.HTTPTransport, nil) cb.HTTPClient.Transport = transport */ if p.site.CbAuthInit == true { client, err = cb.ConnectWithAuth(url, cbauth.NewAuthHandler(nil)) } else { client, err = cb.Connect(url) } if err != nil { logging.Errorf("Error connecting to URL %s", url) return } // check if the default pool exists newpool, err = client.GetPool(p.name) if err != nil { logging.Errorf("Retry Failed Error updating pool name %s: Error %v", p.name, err) return } p.site.client = client } p.lock.Lock() defer p.lock.Unlock() for name, ks := range p.keyspaceCache { logging.Debugf(" Checking keyspace %s", name) newbucket, err := newpool.GetBucket(name) if err != nil { changed = true ks.(*keyspace).deleted = true logging.Errorf(" Error retrieving bucket %s", name) delete(p.keyspaceCache, name) } else if ks.(*keyspace).cbbucket.UUID != newbucket.UUID { logging.Debugf(" UUid of keyspace %v uuid now %v", ks.(*keyspace).cbbucket.UUID, newbucket.UUID) // UUID has changed. Update the keyspace struct with the newbucket ks.(*keyspace).cbbucket = newbucket } // Not deleted. Check if GSI indexer is available if ks.(*keyspace).gsiIndexer == nil { ks.(*keyspace).refreshIndexer(p.site.URL(), p.Name()) } } if changed == true { p.setPool(newpool) } }
func (s *site) Authorize(privileges datastore.Privileges, credentials datastore.Credentials) errors.Error { var authResult bool var err error if s.CbAuthInit == false { // cbauth is not initialized. Access to SASL protected buckets will be // denied by the couchbase server logging.Warnf("CbAuth not intialized") return nil } // if the authentication fails for any of the requested privileges return an error for keyspace, privilege := range privileges { if strings.Contains(keyspace, ":") { q := strings.Split(keyspace, ":") pool := q[0] keyspace = q[1] if strings.EqualFold(pool, "#system") { // trying auth on system keyspace return nil } } logging.Debugf("Authenticating for keyspace %s", keyspace) if len(credentials) == 0 { authResult, err = doAuth(keyspace, "", keyspace, privilege) if authResult == false || err != nil { logging.Infof("Auth failed for keyspace %s", keyspace) return errors.NewDatastoreAuthorizationError(err, "Keyspace "+keyspace) } } else { //look for either the bucket name or the admin credentials for username, password := range credentials { var un string userCreds := strings.Split(username, ":") if len(userCreds) == 1 { un = userCreds[0] } else { un = userCreds[1] } logging.Debugf(" Credentials %v %v", un, userCreds) if strings.EqualFold(un, "Administrator") || strings.EqualFold(userCreds[0], "admin") { authResult, err = doAuth(un, password, keyspace, privilege) } else if un != "" && password != "" { authResult, err = doAuth(un, password, keyspace, privilege) } else { //try with empty password authResult, err = doAuth(keyspace, "", keyspace, privilege) } if err != nil { return errors.NewDatastoreAuthorizationError(err, "Keyspace "+keyspace) } // Auth succeeded if authResult == true { break } continue } } } if authResult == false { return errors.NewDatastoreAuthorizationError(err, "") } return nil }