// Make sure that mail info in db and maildir are identical // TODO Synchronize messages removed in maildir to db func SynchronizeInfo(cdir CachedMaildir, db *bolt.DB) { cdir.dir.Unseen() keys, err := cdir.dir.Keys() if err != nil { log.Fatal(err) } tx, err := db.Begin(true) if err != nil { log.Fatal(err) } bucket := tx.Bucket([]byte(cdir.flagsBucket)) for _, key := range keys { ret := bucket.Get([]byte(key)) if ret == nil { // We don't know about this message flags, err := cdir.dir.Flags(key) if err != nil { println(err) } bucket.Put([]byte(key), []byte(flags)) } } tx.Commit() }
func (db *DB) Open(dbPath string) error { var ( err error boltDB *bolt.DB ) boltDB, err = bolt.Open(dbPath, 0600, nil) if err != nil { return err } var tx *bolt.Tx tx, err = boltDB.Begin(true) if err != nil { return err } bucketName := []byte(base64.StdEncoding.EncodeToString([]byte(dbPath))) _, err = tx.CreateBucketIfNotExists(bucketName) if err != nil { tx.Rollback() return err } if err = tx.Commit(); err != nil { return err } db.boltDB = boltDB db.dbPath = dbPath db.bucketName = bucketName return nil }
func createIncident(incident *Incident, db *bolt.DB) error { if incident.Time == 0 { incident.Time = time.Now().UnixNano() } key, err := time.Unix(0, incident.Time).MarshalBinary() if err != nil { return err } val, err := json.Marshal(incident) if err != nil { return err } tx, err := db.Begin(true) if err != nil { return err } b := tx.Bucket([]byte(incidentsBucket)) err = b.Put(key, val) if err != nil { tx.Rollback() return err } err = tx.Commit() if err != nil { return err } return nil }
func persist(db *bolt.DB, args []string) error { tx, err := db.Begin(true) if err != nil { return err } log.Debugln("start transaction.") for _, v := range args { log.Debugln(v) bucket := tx.Bucket([]byte(BUCKET_NAME)) if bucket == nil { bucket, err = tx.CreateBucket([]byte(BUCKET_NAME)) if err != nil { return err } count := btoi(bucket.Get([]byte(v))) count += 1 err = bucket.Put([]byte(v), itob(count)) if err != nil { tx.Rollback() return err } } } tx.Commit() return nil }
func getLastNTweets(DB *bolt.DB, TweetCnt int) ([]anaconda.Tweet, error) { var Result []anaconda.Tweet Tx, err := DB.Begin(false) if err != nil { return []anaconda.Tweet{}, err } Bucket := Tx.Bucket([]byte("tweets")) Cursor := Bucket.Cursor() k, v := Cursor.Last() for i := 0; i < TweetCnt; i++ { if k == nil { break } var tweet anaconda.Tweet if err := json.Unmarshal(v, &tweet); err != nil { return []anaconda.Tweet{}, err } Result = append(Result, tweet) k, v = Cursor.Prev() } if err := Tx.Rollback(); err != nil { return []anaconda.Tweet{}, err } return Result, nil }
func getTwitterData(DB *bolt.DB) { anaconda.SetConsumerKey("KmxA5PMS1WaVdSnJrYtq5XANb") anaconda.SetConsumerSecret("yt7ydv2qFt7BpyHrMK3UzIj7HXGGv7ezcVTnELxhgh2WMGj9IA") api := anaconda.NewTwitterApi( "268263175-deYL6a9YyDMy8YRDQI0p9NDBoKuZScRKG24Dpqkj", "PrFnSYOzsZjPYc5zhN9qeviyyHH0x1sKkiOYSSyPdWrnS") tweets, err := api.GetHomeTimeline(url.Values{ "count": {"10"}, }) if err != nil { // TODO -- Handle timeouts here panic(err) } Tx, err := DB.Begin(true) if err != nil { // TODO -- Handle this gracely panic(err) } Bucket := Tx.Bucket([]byte("tweets")) for _, t := range tweets { tweetText := t.Text if t.RetweetedStatus != nil { tweetText = t.RetweetedStatus.Text } tweetText = replaceURLS(tweetText, func(s string) string { fmt.Println("Replacing ", s) for retries := 0; retries < 3; retries++ { newS, err := getRedirectedURL(s) if err != nil { time.Sleep(time.Duration(1+retries) * time.Second) continue } return newS } return s }) if t.RetweetedStatus != nil { t.RetweetedStatus.Text = tweetText } else { t.Text = tweetText } data, err := json.Marshal(t) if err != nil { Tx.Rollback() DB.Sync() panic(err) } key := []byte(strconv.FormatInt(t.Id, 16)) if err = Bucket.Put(key, data); err != nil { Tx.Rollback() DB.Sync() panic(err) } } Tx.Commit() }
func (self *Progress) tx(db *bolt.DB) *bolt.Tx { tx, err := db.Begin(true) if err != nil { log.Println(err) return nil } return tx }
func upgrade2To3(db *bolt.DB) error { fmt.Println("Upgrading v2 to v3...") tx, err := db.Begin(true) if err != nil { return err } defer tx.Rollback() fmt.Println("Upgrading bucket", string(logBucket)) lb := tx.Bucket(logBucket) c := lb.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var delta proto.LogDelta err := delta.Unmarshal(v) if err != nil { return err } delta.Quad.Upgrade() data, err := delta.Marshal() if err != nil { return err } lb.Put(k, data) } if err := tx.Commit(); err != nil { return err } tx, err = db.Begin(true) if err != nil { return err } defer tx.Rollback() fmt.Println("Upgrading bucket", string(nodeBucket)) nb := tx.Bucket(nodeBucket) c = nb.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var vd proto.NodeData err := vd.Unmarshal(v) if err != nil { return err } vd.Upgrade() data, err := vd.Marshal() if err != nil { return err } nb.Put(k, data) } if err := tx.Commit(); err != nil { return err } return nil }
func prometheusLoad(db *bolt.DB) error { log.Printf("loading prometheus metrics...") tx, err := db.Begin(false) if err != nil { return err } defer tx.Rollback() b := tx.Bucket([]byte(prometheusBucket)) b.ForEach(func(k, v []byte) error { PrometheusQueryMap[string(k)] = string(v) log.Printf(" loaded prometheus metric %s=%s", string(k), string(v)) return nil }) return nil }
func prometheusAdd(db *bolt.DB, name, query string) error { log.Printf("add prometheus metrics %s=%s", name, query) tx, err := db.Begin(true) if err != nil { return err } b := tx.Bucket([]byte(prometheusBucket)) b.Put([]byte(name), []byte(query)) err = tx.Commit() if err != nil { PrometheusQueryMap[name] = query } return err }
func NewCachedMailDir(mdirPath string, db *bolt.DB) (dir CachedMaildir) { mdir := maildir.Dir(mdirPath) bucketName := path.Base(mdirPath) tx, err := db.Begin(true) if err != nil { log.Fatal(err) } _, err = tx.CreateBucketIfNotExists([]byte(bucketName + "_flags")) if err != nil { log.Fatal(err) } tx.Commit() dir = CachedMaildir{dir: &mdir, flagsBucket: bucketName + "_flags"} SynchronizeInfo(dir, db) return }
func boltSender(db *bolt.DB, tourl string) { pr, pw := io.Pipe() go func() { tx, err := db.Begin(false) if err != nil { panic(err) } _, err = tx.WriteTo(pw) if err != nil { panic(err) } pw.Close() }() fmt.Println("before post") _, err := http.Post(tourl, "plain/text", pr) fmt.Println("after post") if err != nil { panic(err) } }
func getIncidentsByDay(day int64, db *bolt.DB) ([]string, error) { since := time.Unix(0, 0).Add(time.Duration(day) * 24 * time.Hour) until := since.Add(24 * time.Hour) sinceb, err := since.MarshalBinary() if err != nil { return nil, err } untilb, err := until.MarshalBinary() if err != nil { return nil, err } tx, err := db.Begin(false) if err != nil { return nil, err } defer tx.Rollback() eventslice := []string{} c := tx.Bucket([]byte(incidentsBucket)).Cursor() fk, _ := c.First() if fk == nil { return nil, fmt.Errorf("no available incidents") } if bytes.Compare(sinceb, fk) < 0 { ft := &time.Time{} err := ft.UnmarshalBinary(fk) if err != nil { return nil, err } return nil, fmt.Errorf("no available incidents before %d", int(ft.Sub(time.Unix(0, 0)).Hours())/24) } for k, v := c.Seek(sinceb); k != nil && bytes.Compare(k, untilb) <= 0; k, v = c.Next() { eventslice = append(eventslice, string(v)) } return eventslice, nil }
func defragdb(odb, tmpdb *bolt.DB, limit int) error { // open a tx on tmpdb for writes tmptx, err := tmpdb.Begin(true) if err != nil { return err } // open a tx on old db for read tx, err := odb.Begin(false) if err != nil { return err } defer tx.Rollback() c := tx.Cursor() count := 0 for next, _ := c.First(); next != nil; next, _ = c.Next() { b := tx.Bucket(next) if b == nil { return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) } tmpb, berr := tmptx.CreateBucketIfNotExists(next) if berr != nil { return berr } b.ForEach(func(k, v []byte) error { count++ if count > limit { err = tmptx.Commit() if err != nil { return err } tmptx, err = tmpdb.Begin(true) if err != nil { return err } tmpb = tmptx.Bucket(next) count = 0 } err = tmpb.Put(k, v) if err != nil { return err } return nil }) } return tmptx.Commit() }
// Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB tx, err := db.Begin(true) equals(t, err, bolt.ErrDatabaseNotOpen) assert(t, tx == nil, "") }
// Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_DatabaseNotOpen(t *testing.T) { var db bolt.DB tx, err := db.Begin(false) assert(t, tx == nil, "") equals(t, err, bolt.ErrDatabaseNotOpen) }
// Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } }
func upgrade1To2(db *bolt.DB) error { fmt.Println("Upgrading v1 to v2...") tx, err := db.Begin(true) if err != nil { return err } defer tx.Rollback() fmt.Println("Upgrading bucket", string(logBucket)) lb := tx.Bucket(logBucket) c := lb.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var delta graph.Delta err := json.Unmarshal(v, &delta) if err != nil { return err } newd := deltaToProto(delta) data, err := newd.Marshal() if err != nil { return err } lb.Put(k, data) } if err := tx.Commit(); err != nil { return err } tx, err = db.Begin(true) if err != nil { return err } defer tx.Rollback() fmt.Println("Upgrading bucket", string(nodeBucket)) nb := tx.Bucket(nodeBucket) c = nb.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { var vd proto.NodeData err := json.Unmarshal(v, &vd) if err != nil { return err } data, err := vd.Marshal() if err != nil { return err } nb.Put(k, data) } if err := tx.Commit(); err != nil { return err } for _, bucket := range [4][]byte{spoBucket, ospBucket, posBucket, cpsBucket} { tx, err = db.Begin(true) if err != nil { return err } defer tx.Rollback() fmt.Println("Upgrading bucket", string(bucket)) b := tx.Bucket(bucket) cur := b.Cursor() for k, v := cur.First(); k != nil; k, v = cur.Next() { var h proto.HistoryEntry err := json.Unmarshal(v, &h) if err != nil { return err } data, err := h.Marshal() if err != nil { return err } b.Put(k, data) } if err := tx.Commit(); err != nil { return err } } return nil }
func (cmd *CompactCommand) compact(dst, src *bolt.DB) error { // commit regularly, or we'll run out of memory for large datasets if using one transaction. var size int64 tx, err := dst.Begin(true) if err != nil { return err } defer tx.Rollback() if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { // On each key/value, check if we have exceeded tx size. sz := int64(len(k) + len(v)) if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 { // Commit previous transaction. if err := tx.Commit(); err != nil { return err } // Start new transaction. tx, err = dst.Begin(true) if err != nil { return err } size = 0 } size += sz // Create bucket on the root transaction if this is the first level. nk := len(keys) if nk == 0 { bkt, err := tx.CreateBucket(k) if err != nil { return err } if err := bkt.SetSequence(seq); err != nil { return err } return nil } // Create buckets on subsequent levels, if necessary. b := tx.Bucket(keys[0]) if nk > 1 { for _, k := range keys[1:] { b = b.Bucket(k) } } // If there is no value then this is a bucket call. if v == nil { bkt, err := b.CreateBucket(k) if err != nil { return err } if err := bkt.SetSequence(seq); err != nil { return err } return nil } // Otherwise treat it as a key/value pair. return b.Put(k, v) }); err != nil { return err } return tx.Commit() }
func loadQualifier(db *bolt.DB) { tx, err := db.Begin(true) if err != nil { log.Fatal(err) } // Use the transaction... _, err = tx.CreateBucket([]byte(BUCKET_QUALIFIER)) if err != nil { log.Fatal(err) } // Commit the transaction and check for error. if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("\tLoading Qualifier MeSH XML from file: ", *qualifierXmlFile) qualChannel, file, err := gomesh2016.QualifierChannelFromFile(*qualifierXmlFile) if err != nil { log.Fatal(err) } commitCounter := 0 counter := 0 tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b := tx.Bucket([]byte(BUCKET_QUALIFIER)) for qualifier := range qualChannel { counter = counter + 1 if commitCounter == commitSize { if err := tx.Commit(); err != nil { log.Fatal(err) } tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b = tx.Bucket([]byte(BUCKET_PHARMACOLOGICAL)) commitCounter = 0 } else { commitCounter = commitCounter + 1 } key := qualifier.QualifierUI value, err := json.Marshal(qualifier) if err != nil { log.Fatal("error:", err) } if commitCounter == commitSize { log.Println(key) log.Println(len(value)) // var b bytes.Buffer // w := gzip.NewWriter(&b) // w.Write(value) // w.Close() // log.Println(b.Len()) } //log.Println(string(value)) err = b.Put([]byte(key), value) if err != nil { log.Fatal(err) } } file.Close() if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("Loaded", counter, "quality") }
func loadPharmacological(db *bolt.DB) { tx, err := db.Begin(true) if err != nil { log.Fatal(err) } // Use the transaction... _, err = tx.CreateBucket([]byte(BUCKET_PHARMACOLOGICAL)) if err != nil { log.Fatal(err) } // Commit the transaction and check for error. if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("\tLoading Pharmacological MeSH XML from file: ", *pharmacologicalXmlFile) pharmaChannel, file, err := gomesh2016.PharmacologicalChannelFromFile(*pharmacologicalXmlFile) if err != nil { log.Fatal(err) } commitCounter := 0 counter := 0 tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b := tx.Bucket([]byte(BUCKET_PHARMACOLOGICAL)) for pharma := range pharmaChannel { counter = counter + 1 if commitCounter == commitSize { if err := tx.Commit(); err != nil { log.Fatal(err) } tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b = tx.Bucket([]byte(BUCKET_PHARMACOLOGICAL)) commitCounter = 0 } else { commitCounter = commitCounter + 1 } key := pharma.DescriptorReferredTo.DescriptorUI value, err := json.Marshal(pharma) if err != nil { log.Fatal("error:", err) } if commitCounter == commitSize { log.Println(key) log.Println(len(value)) // var b bytes.Buffer // w := gzip.NewWriter(&b) // w.Write(value) // w.Close() // log.Println(b.Len()) } //log.Println(string(value)) err = b.Put([]byte(key), value) if err != nil { log.Fatal(err) } } file.Close() if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("Loaded", counter, "pharma") }
func loadSupplemental(db *bolt.DB) { tx, err := db.Begin(true) if err != nil { log.Fatal(err) } // Use the transaction... _, err = tx.CreateBucket([]byte(BUCKET_SUPPLEMENTAL)) if err != nil { log.Fatal(err) } // Commit the transaction and check for error. if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("\tLoading Supplemental MeSH XML from file: ", *supplementalXmlFile) suppChannel, file, err := gomesh2016.SupplementalChannelFromFile(*supplementalXmlFile) if err != nil { log.Fatal(err) } commitCounter := 0 counter := 0 tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b := tx.Bucket([]byte(BUCKET_SUPPLEMENTAL)) for s := range suppChannel { counter = counter + 1 if commitCounter == commitSize { if err := tx.Commit(); err != nil { log.Fatal(err) } tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b = tx.Bucket([]byte(BUCKET_SUPPLEMENTAL)) commitCounter = 0 } else { commitCounter = commitCounter + 1 } key := s.SupplementalRecordUI value, err := json.Marshal(s) if err != nil { log.Fatal("error:", err) } if commitCounter == commitSize { log.Println(key) log.Println(len(value)) // var b bytes.Buffer // w := gzip.NewWriter(&b) // w.Write(value) // w.Close() // log.Println(b.Len()) } //log.Println(string(value)) err = b.Put([]byte(key), value) if err != nil { log.Fatal(err) } } file.Close() if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("Loaded", counter, "supplemental") }
func loadDescriptor(db *bolt.DB) { tx, err := db.Begin(true) if err != nil { log.Fatal(err) } // Use the transaction... _, err = tx.CreateBucket([]byte(BUCKET_DESCRIPTOR)) if err != nil { log.Fatal(err) } // Commit the transaction and check for error. if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("\tLoading Description MeSH XML from file: ", *descriptorXmlFile) descChannel, file, err := gomesh2016.DescriptorChannelFromFile(*descriptorXmlFile) if err != nil { log.Fatal(err) } commitCounter := 0 counter := 0 tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b := tx.Bucket([]byte(BUCKET_DESCRIPTOR)) size := 0 // This is the root of the tree root := InitializeNode() for desc := range descChannel { if desc.TreeNumberList != nil{ for _, treeNumber := range desc.TreeNumberList.TreeNumber { root.AddNode(treeNumber, desc.DescriptorUI, desc.DescriptorName) //log.Println("---------") //log.Println(tree) m := strings.Split(treeNumber, ".") if len(m) > size { size = len(m) } } } counter = counter + 1 if commitCounter == commitSize { if err := tx.Commit(); err != nil { log.Fatal(err) } tx, err = db.Begin(true) if err != nil { log.Fatal("error:", err) } b = tx.Bucket([]byte(BUCKET_DESCRIPTOR)) commitCounter = 0 } else { commitCounter = commitCounter + 1 } key := desc.DescriptorUI value, err := json.Marshal(desc) if err != nil { log.Fatal("error:", err) } if commitCounter == commitSize { log.Println(key) log.Println(len(value)) // var b bytes.Buffer // w := gzip.NewWriter(&b) // w.Write(value) // w.Close() // log.Println(b.Len()) } //log.Println(string(value)) err = b.Put([]byte(key), value) if err != nil { log.Fatal(err) } } file.Close() if err := tx.Commit(); err != nil { log.Fatal(err) } log.Println("Loaded", counter, "description") log.Println("Size=", size) // TREE tx, err = db.Begin(true) if err != nil { log.Fatal(err) } treeBucket := tx.Bucket([]byte(BUCKET_TREE)) root.DepthTraverse(0, Visitor, treeBucket) }