// Add a single sentence to the database func (ns *Nonsentence) addSentence(tx *bolt.Tx, sentence string) error { wordsBucket := tx.Bucket([]byte("words")) startsBucket := tx.Bucket([]byte("starts")) if (wordsBucket == nil) || (startsBucket == nil) { return fmt.Errorf("Buckets not found") } // Split sentence on whitespace var words = strings.Fields(sentence) if len(words) < 3 { // log.Printf("Ignoring small sentence: %v", sentence) return nil } // Store words in wordsBucket for i := 2; i < len(words); i++ { if err := storeWords(wordsBucket, words[i-2], words[i-1], words[i]); err != nil { return err } } if err := storeWords(wordsBucket, words[len(words)-2], words[len(words)-1], ""); err != nil { return err } // Store starts in startsBucket key := []byte(words[0] + " " + words[1]) if err := startsBucket.Put(key, []byte{}); err != nil { return err } return nil }
// Retrieves a key from the database and verifies that it is what is expected. func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { // Randomly retrieve an existing exist. keys := qdb.Rand() if len(keys) == 0 { return } // Retrieve root bucket. b := tx.Bucket(keys[0]) if b == nil { panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) } // Drill into nested buckets. for _, key := range keys[1 : len(keys)-1] { b = b.Bucket(key) if b == nil { panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) } } // Verify key/value on the final bucket. expected := qdb.Get(keys) actual := b.Get(keys[len(keys)-1]) if !bytes.Equal(actual, expected) { fmt.Println("=== EXPECTED ===") fmt.Println(expected) fmt.Println("=== ACTUAL ===") fmt.Println(actual) fmt.Println("=== END ===") panic("value mismatch") } }
//Del deletes one key-value pair. func Del(tx *bolt.Tx, bucket string, key []byte) error { b := tx.Bucket([]byte(bucket)) if b == nil { return errors.New("bucket not found " + bucket) } return b.Delete(key) }
func EntrySave(tx *bolt.Tx, entry DbEntry, key string) error { godbc.Require(tx != nil) godbc.Require(len(key) > 0) // Access bucket b := tx.Bucket([]byte(entry.BucketName())) if b == nil { err := errors.New("Unable to access db") logger.Err(err) return err } // Save device entry to db buffer, err := entry.Marshal() if err != nil { logger.Err(err) return err } // Save data using the id as the key err = b.Put([]byte(key), buffer) if err != nil { logger.Err(err) return err } return nil }
// saveRepository saves a repository in the store. func (s *Store) saveRepository(tx *bolt.Tx, r *internal.Repository) error { buf, err := proto.Marshal(r) if err != nil { return err } return tx.Bucket([]byte("repositories")).Put([]byte(r.GetID()), buf) }
// removeFileContract removes a file contract from the database. func removeFileContract(tx *bolt.Tx, id types.FileContractID) { // Delete the file contract entry. fcBucket := tx.Bucket(FileContracts) fcBytes := fcBucket.Get(id[:]) // Sanity check - should not be removing a file contract not in the db. if build.DEBUG && fcBytes == nil { panic("nil file contract") } err := fcBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } // Delete the entry for the file contract's expiration. The portion of // 'fcBytes' used to determine the expiration bucket id is the // byte-representation of the file contract window end, which always // appears at bytes 48-56. expirationBucketID := append(prefixFCEX, fcBytes[48:56]...) expirationBucket := tx.Bucket(expirationBucketID) expirationBytes := expirationBucket.Get(id[:]) if expirationBytes == nil { panic(errNilItem) } err = expirationBucket.Delete(id[:]) if build.DEBUG && err != nil { panic(err) } }
func (m *Manager) persistProvider(tx *bolt.Tx, id string) error { // Note: This method does *not* include re-serializing per-volume state, // because we assume that hasn't changed unless the change request // for the volume came through us and was handled elsewhere already. provider, ok := m.providers[id] if !ok { return tx.Bucket([]byte("providers")).DeleteBucket([]byte(id)) } providersBucket, err := m.getProviderBucket(tx, id) if err != nil { return fmt.Errorf("could not persist provider info to boltdb: %s", err) } pspec := &volume.ProviderSpec{} pspec.Kind = provider.Kind() b, err := provider.MarshalGlobalState() if err != nil { return fmt.Errorf("failed to serialize provider info: %s", err) } pspec.Config = b b, err = json.Marshal(pspec) if err != nil { return fmt.Errorf("failed to serialize provider info: %s", err) } err = providersBucket.Put([]byte("global"), b) if err != nil { return fmt.Errorf("could not persist provider info to boltdb: %s", err) } return nil }
func getReview(tx *bolt.Tx, id int) (review.R, error) { var ( revisions []review.Revision review review.R ) metaData, err := getMetaData(tx) if err != nil { return review, err } root := tx.Bucket(rootKey) if root == nil { return review, ErrNoDB } reviews := root.Bucket(reviewsKey) if reviews == nil { return review, ErrNoDB } reviewBkt, err := getReviewBucket(tx, id) if err != nil { return review, err } review.Summary = metaData.Summaries[strconv.Itoa(id)] rev := reviewBkt.Get(revisionsKey) dec := gob.NewDecoder(bytes.NewReader(rev)) if err := dec.Decode(&revisions); err != nil { return review, err } review.Revisions = revisions return review, nil }
func SetCounterparty(tx *bolt.Tx, cpt *core.Counterparty) error { b, err := json.Marshal(cpt) if err != nil { return err } err = tx.Bucket([]byte("Counterparties")).Put([]byte(cpt.Pubkey), b) if err != nil { return err } // Relations b, err = json.Marshal(cpt.Judge) if err != nil { return err } err = tx.Bucket([]byte("Judges")).Put(cpt.Judge.Pubkey, b) if err != nil { return err } return nil }
// removeTriple removes a triple from the indices. If the triple // contains any terms unique to that triple, they will also be removed. func (db *DB) removeTriple(tx *bolt.Tx, s, p, o uint32) error { // TODO think about what to do if present in one index but // not in another: maybe panic? Cause It's a bug that should be fixed. indices := []struct { k1 uint32 k2 uint32 v uint32 bk []byte }{ {s, p, o, bucketSPO}, {o, s, p, bucketOSP}, {p, o, s, bucketPOS}, } key := make([]byte, 8) for _, i := range indices { bkt := tx.Bucket(i.bk) copy(key, u32tob(i.k1)) copy(key[4:], u32tob(i.k2)) bo := bkt.Get(key) if bo == nil { // TODO should never happen, return bug error? return ErrNotFound } bitmap := roaring.NewBitmap() _, err := bitmap.ReadFrom(bytes.NewReader(bo)) if err != nil { return err } hasTriple := bitmap.CheckedRemove(i.v) if !hasTriple { // TODO should never happen, return bug error? return ErrNotFound } // Remove from index if bitmap is empty if bitmap.GetCardinality() == 0 { err = bkt.Delete(key) if err != nil { return err } } else { var b bytes.Buffer _, err = bitmap.WriteTo(&b) if err != nil { return err } err = bkt.Put(key, b.Bytes()) if err != nil { return err } } } //atomic.AddInt64(&db.numTr, -1) return db.removeOrphanedTerms(tx, s, p, o) }
//line nav.ego:1 func nav(w io.Writer, tx *bolt.Tx) error { //line nav.ego:2 if _, err := fmt.Fprintf(w, "\n\n"); err != nil { return err } //line nav.ego:4 if _, err := fmt.Fprintf(w, "\n"); err != nil { return err } //line nav.ego:5 if _, err := fmt.Fprintf(w, "\n\n"); err != nil { return err } //line nav.ego:6 if _, err := fmt.Fprintf(w, "<h1>"); err != nil { return err } //line nav.ego:6 if _, err := fmt.Fprintf(w, "%v", filepath.Base(tx.DB().Path())); err != nil { return err } //line nav.ego:6 if _, err := fmt.Fprintf(w, "</h1>\n"); err != nil { return err } return nil }
func (db *DB) addTerm(tx *bolt.Tx, term rdf.Term) (id uint32, err error) { bt := db.encode(term) if id, err = db.getIDb(tx, bt); err == nil { // Term is allready in database return id, nil } else if err != ErrNotFound { // Some other IO error occured return 0, err } // get a new ID bkt := tx.Bucket(bucketTerms) n, err := bkt.NextSequence() if err != nil { return 0, err } if n > MaxTerms { return 0, ErrDBFull } id = uint32(n) idb := u32tob(uint32(n)) // store term and index it err = bkt.Put(idb, bt) if err != nil { return 0, err } bkt = tx.Bucket(bucketIdxTerms) err = bkt.Put(bt, idb) return id, err }
func depersistMessage(tx *bolt.Tx, id int64) error { content_bucket, err := tx.CreateBucketIfNotExists(MESSAGE_CONTENT_BUCKET) if err != nil { return err } return content_bucket.Delete(binaryId(id)) }
// validSiacoins checks that the siacoin inputs and outputs are valid in the // context of the current consensus set. func validSiacoins(tx *bolt.Tx, t types.Transaction) error { scoBucket := tx.Bucket(SiacoinOutputs) var inputSum types.Currency for _, sci := range t.SiacoinInputs { // Check that the input spends an existing output. scoBytes := scoBucket.Get(sci.ParentID[:]) if scoBytes == nil { return errMissingSiacoinOutput } // Check that the unlock conditions match the required unlock hash. var sco types.SiacoinOutput err := encoding.Unmarshal(scoBytes, &sco) if build.DEBUG && err != nil { panic(err) } if sci.UnlockConditions.UnlockHash() != sco.UnlockHash { return errWrongUnlockConditions } inputSum = inputSum.Add(sco.Value) } if inputSum.Cmp(t.SiacoinOutputSum()) != 0 { return errSiacoinInputOutputMismatch } return nil }
// addBlockMap adds a processed block to the block map. func addBlockMap(tx *bolt.Tx, pb *processedBlock) { id := pb.Block.ID() err := tx.Bucket(BlockMap).Put(id[:], encoding.Marshal(*pb)) if build.DEBUG && err != nil { panic(err) } }
func SetAccount(tx *bolt.Tx, acct *core.Account) error { b, err := json.Marshal(acct) if err != nil { return err } err = tx.Bucket([]byte("Accounts")).Put([]byte(acct.Pubkey), b) if err != nil { return err } // Relations b, err = json.Marshal(acct.Judge) if err != nil { return err } err = tx.Bucket([]byte("Judges")).Put(acct.Judge.Pubkey, b) if err != nil { return err } return nil }
// addFileContract adds a file contract to the database. An error is returned // if the file contract is already in the database. func addFileContract(tx *bolt.Tx, id types.FileContractID, fc types.FileContract) { // Add the file contract to the database. fcBucket := tx.Bucket(FileContracts) // Sanity check - should not be adding a zero-payout file contract. if build.DEBUG && fc.Payout.IsZero() { panic("adding zero-payout file contract") } // Sanity check - should not be adding a file contract already in the db. if build.DEBUG && fcBucket.Get(id[:]) != nil { panic("repeat file contract") } err := fcBucket.Put(id[:], encoding.Marshal(fc)) if build.DEBUG && err != nil { panic(err) } // Add an entry for when the file contract expires. expirationBucketID := append(prefixFCEX, encoding.Marshal(fc.WindowEnd)...) expirationBucket, err := tx.CreateBucketIfNotExists(expirationBucketID) if build.DEBUG && err != nil { panic(err) } err = expirationBucket.Put(id[:], []byte{}) if build.DEBUG && err != nil { panic(err) } }
func SetChannel(tx *bolt.Tx, ch *core.Channel) error { b, err := json.Marshal(ch) if err != nil { return err } err = tx.Bucket(Channels).Put([]byte(ch.ChannelId), b) if err != nil { return err } // Relations // Judge err = SetJudge(tx, ch.Judge) if err != nil { return err } // Accounts err = SetAccount(tx, ch.Accounts[0]) if err != nil { return err } err = SetAccount(tx, ch.Accounts[1]) if err != nil { return err } return nil }
// createDSCOBucket creates a bucket for the delayed siacoin outputs at the // input height. func createDSCOBucket(tx *bolt.Tx, bh types.BlockHeight) { bucketID := append(prefixDSCO, encoding.Marshal(bh)...) _, err := tx.CreateBucket(bucketID) if build.DEBUG && err != nil { panic(err) } }
func GetChannels(tx *bolt.Tx) ([]*core.Channel, error) { var err error chs := []*core.Channel{} err = tx.Bucket(Channels).ForEach(func(k, v []byte) error { ch := &core.Channel{} err = json.Unmarshal(v, ch) if err != nil { return err } err = PopulateChannel(tx, ch) if err != nil { return errors.New("error populating channel") } chs = append(chs, ch) return nil }) if err != nil { return nil, errors.New("database error") } return chs, nil }
func createBucket(name []byte, tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists(name) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }
func (bs *boltStorage) bucket(tx *bolt.Tx) (b *bolt.Bucket, err error) { b = tx.Bucket(dbBucket) if b == nil { b, err = tx.CreateBucketIfNotExists(dbBucket) } return b, err }
func (s *Store) putOfferDates(tx *bolt.Tx, hash string, ages []OfferAge) error { data, err := json.Marshal(&ages) if err != nil { return err } return tx.Bucket(offerDatesBucket).Put([]byte(hash), data) }
func (qs *QuadStore) UpdateValueKeyBy(name string, amount int64, tx *bolt.Tx) error { value := ValueData{name, amount} b := tx.Bucket(nodeBucket) b.FillPercent = localFillPercent key := qs.createValueKeyFor(name) data := b.Get(key) if data != nil { // Node exists in the database -- unmarshal and update. err := json.Unmarshal(data, &value) if err != nil { glog.Errorf("Error: couldn't reconstruct value: %v", err) return err } value.Size += amount } // Are we deleting something? if value.Size <= 0 { value.Size = 0 } // Repackage and rewrite. bytes, err := json.Marshal(&value) if err != nil { glog.Errorf("Couldn't write to buffer for value %s: %s", name, err) return err } err = b.Put(key, bytes) return err }
// upgrade is responsible for checking the version of the data store // and upgrading itself if necessary. func upgrade(tx *bolt.Tx) error { bVersion := tx.Bucket(miscBucket).Get(versionKey) if bVersion[0] != latestStoreVersion { return errors.New("Unrecognized version of data store.") } return nil }
func (qs *QuadStore) WriteHorizonAndSize(tx *bolt.Tx) error { buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, qs.size) if err != nil { glog.Errorf("Couldn't convert size!") return err } b := tx.Bucket(metaBucket) b.FillPercent = localFillPercent werr := b.Put([]byte("size"), buf.Bytes()) if werr != nil { glog.Error("Couldn't write size!") return werr } buf.Reset() err = binary.Write(buf, binary.LittleEndian, qs.horizon) if err != nil { glog.Errorf("Couldn't convert horizon!") } werr = b.Put([]byte("horizon"), buf.Bytes()) if werr != nil { glog.Error("Couldn't write horizon!") return werr } return err }
// Inserts a key into the database. func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { var err error keys, value := randKeys(), randValue() // Retrieve root bucket. b := tx.Bucket(keys[0]) if b == nil { b, err = tx.CreateBucket(keys[0]) if err != nil { panic("create bucket: " + err.Error()) } } // Create nested buckets, if necessary. for _, key := range keys[1 : len(keys)-1] { child := b.Bucket(key) if child != nil { b = child } else { b, err = b.CreateBucket(key) if err != nil { panic("create bucket: " + err.Error()) } } } // Insert into database. if err := b.Put(keys[len(keys)-1], value); err != nil { panic("put: " + err.Error()) } // Insert into in-memory database. qdb.Put(keys, value) }
func (q *Queue) DepersistBoltTx(tx *bolt.Tx) error { bucket, err := tx.CreateBucketIfNotExists(QUEUE_BUCKET_NAME) if err != nil { return fmt.Errorf("create bucket: %s", err) } return persist.DepersistOneBoltTx(bucket, q.Name) }
// deleteRoll deletes the given AutoRollIssue from the database within the // given transaction. func deleteRoll(tx *bolt.Tx, a *autoroll.AutoRollIssue) error { rolls := tx.Bucket(BUCKET_ROLLS) rollsByDate := tx.Bucket(BUCKET_ROLLS_BY_DATE) // Don't trust the created time of the passed-in roll; use the one we already have in the DB. serialized := rolls.Get(rollKey(a)) if serialized == nil { return fmt.Errorf("The given issue (%d) does not exist in %s", a.Issue, string(BUCKET_ROLLS)) } var oldIssue autoroll.AutoRollIssue if err := json.Unmarshal(serialized, &oldIssue); err != nil { return err } oldByDate := rollsByDate.Get(timeKey(&oldIssue)) if oldByDate == nil { return fmt.Errorf("The given issue (%d) does not exist in %s", a.Issue, string(BUCKET_ROLLS_BY_DATE)) } if err := rollsByDate.Delete(timeKey(&oldIssue)); err != nil { return err } if err := rolls.Delete(rollKey(a)); err != nil { return err } return nil }
// fullSync is an internal versino of the FullSync method which allows callers // to sync the contents of an OpenChannel while re-using an existing database // transaction. func (c *OpenChannel) fullSync(tx *bolt.Tx) error { // TODO(roasbeef): add helper funcs to create scoped update // First fetch the top level bucket which stores all data related to // current, active channels. chanBucket, err := tx.CreateBucketIfNotExists(openChannelBucket) if err != nil { return err } // Within this top level bucket, fetch the bucket dedicated to storing // open channel data specific to the remote node. nodePub := c.IdentityPub.SerializeCompressed() nodeChanBucket, err := chanBucket.CreateBucketIfNotExists(nodePub) if err != nil { return err } // Add this channel ID to the node's active channel index if // it doesn't already exist. chanIDBucket, err := nodeChanBucket.CreateBucketIfNotExists(chanIDBucket) if err != nil { return err } var b bytes.Buffer if err := writeOutpoint(&b, c.ChanID); err != nil { return err } if chanIDBucket.Get(b.Bytes()) == nil { if err := chanIDBucket.Put(b.Bytes(), nil); err != nil { return err } } return putOpenChannel(chanBucket, nodeChanBucket, c) }