// Wrapper to get hold of a factoid collection handle func Init() *Collection { mc := &Collection{db.Init().C(COLLECTION)} if err := mc.EnsureIndex(mgo.Index{ Key: []string{"tag", "source", "dest"}, }); err != nil { logging.Error("Couldn't create an index on markov: %s", err) } return mc }
func Init() *Collection { pc := &Collection{db.Init().C(COLLECTION)} if err := pc.EnsureIndex(mgo.Index{ Key: []string{"nick"}, Unique: true, }); err != nil { logging.Error("Couldn't create an index on push: %s", err) } return pc }
func Init() *Collection { rc := &Collection{ Collection: db.Init().C(COLLECTION), } for _, k := range []string{"remindat", "from", "to", "tell"} { if err := rc.EnsureIndexKey(k); err != nil { logging.Error("Couldn't create %s index on sp0rkle.reminders: %v", k, err) } } return rc }
// Wrapper to get hold of a factoid collection handle func Init() *Collection { fc := &Collection{ Collection: db.Init().C(COLLECTION), seen: make(map[string][]bson.ObjectId), } err := fc.EnsureIndex(mgo.Index{Key: []string{"key"}}) if err != nil { logging.Error("Couldn't create index on sp0rkle.factoids: %v", err) } return fc }
func Ns(ns string) namespace { lock.Lock() defer lock.Unlock() if conf == nil { conf = db.Init().C(COLLECTION) err := conf.EnsureIndex(mgo.Index{Key: []string{"ns", "key"}, Unique: true}) if err != nil { logging.Error("Couldn't create index on sp0rkle.conf: %s", err) } } return namespace(ns) }
func Init() *Collection { sc := &Collection{db.Init().C(COLLECTION)} indexes := [][]string{ {"key", "action"}, // For searching ... {"timestamp"}, // ... and ordering seen entries. } for _, key := range indexes { if err := sc.EnsureIndex(mgo.Index{Key: key}); err != nil { logging.Error("Couldn't create %v index on sp0rkle.seen: %v", key, err) } } return sc }
func Init() *Collection { sc := &Collection{db.Init().C(COLLECTION)} indexes := [][]string{ {"chan", "key"}, {"lines"}, } for _, key := range indexes { if err := sc.EnsureIndex(mgo.Index{Key: key}); err != nil { logging.Error("Couldn't create %v index on sp0rkle.stats: %v", key, err) } } return sc }
func Init() *Collection { uc := &Collection{db.Init().C(collection)} err := uc.EnsureIndex(mgo.Index{Key: []string{"url"}, Unique: true}) if err != nil { logging.Error("Couldn't create url index on sp0rkle.urls: %s", err) } for _, idx := range []string{"cachedas", "shortened"} { err := uc.EnsureIndex(mgo.Index{Key: []string{idx}}) if err != nil { logging.Error("Couldn't create %s index on sp0rkle.urls: %s", idx, err) } } return uc }
func Init() *Collection { kc := &Collection{db.Init().C(COLLECTION)} if err := kc.EnsureIndex(mgo.Index{ Key: []string{"key"}, Unique: true, }); err != nil { logging.Error("Couldn't create index on karma.key: %s", err) } for _, key := range []string{"score", "votes"} { if err := kc.EnsureIndexKey(key); err != nil { logging.Error("Couldn't create index on karma.%s: %s", key, err) } } return kc }
func Init() *Collection { qc := &Collection{ Collection: db.Init().C(COLLECTION), seen: make(map[string][]bson.ObjectId), maxQID: 1, } err := qc.EnsureIndex(mgo.Index{Key: []string{"qid"}, Unique: true}) if err != nil { logging.Error("Couldn't create index on sp0rkle.quotes: %v", err) } var res Quote if err := qc.Find(bson.M{}).Sort("-qid").One(&res); err == nil { qc.maxQID = int32(res.QID) } return qc }
func main() { flag.Parse() logging.InitFromFlags() // Initialise bot state bot.Init() // Connect to mongo db.Init() defer db.Close() // Add drivers calcdriver.Init() decisiondriver.Init() factdriver.Init() karmadriver.Init() markovdriver.Init() quotedriver.Init() reminddriver.Init() seendriver.Init() urldriver.Init() // Start up the HTTP server go http.ListenAndServe(*httpPort, nil) // Connect the bot to IRC and wait; reconnects are handled automatically. // If we get true back from the bot, re-exec the (rebuilt) binary. if bot.Connect() { // Calling syscall.Exec probably means deferred functions won't get // called, so disconnect from mongodb first for politeness' sake. db.Close() // If sp0rkle was run from PATH, we need to do that lookup manually. fq, _ := exec.LookPath(os.Args[0]) logging.Warn("Re-executing sp0rkle with args '%v'.", os.Args) err := syscall.Exec(fq, os.Args, os.Environ()) if err != nil { // hmmmmmm logging.Fatal("Couldn't re-exec sp0rkle: %v", err) } } logging.Info("Shutting down cleanly.") }
// Wrapper to get hold of a factoid collection handle func Init() *Collection { fc := &Collection{ Collection: db.Init().C(COLLECTION), } return fc }
func main() { flag.Parse() logging.InitFromFlags() // Let's go find some mongo. db.Init() defer db.Close() qc := quotes.Init() // A communication channel of Quotes. quotes := make(chan *quotes.Quote) rows := make(chan []interface{}) // Function to feed rows into the rows channel. row_feeder := func(sth *sqlite3.Statement, row ...interface{}) { rows <- row } // Function to execute a query on the SQLite db. db_query := func(dbh *sqlite3.Database) { n, err := dbh.Execute("SELECT * FROM Quotes;", row_feeder) if err == nil { logging.Info("Read %d rows from database.\n", n) } else { logging.Error("DB error: %s\n", err) } } // Open up the quote database in a goroutine and feed rows // in on the input_rows channel. go func() { sqlite3.Session(*file, db_query) // once we've done the query, close the channel to indicate this close(rows) }() // Another goroutine to munge the rows into quotes. // This was originally done inside the SQLite callbacks, but // cgo or sqlite3 obscures runtime panics and makes fail happen. go func() { for row := range rows { parseQuote(row, quotes) } close(quotes) }() // And finally... count := 0 var err error for quote := range quotes { // ... push each quote into mongo err = qc.Insert(quote) if err != nil { logging.Error("Awww: %v\n", err) } else { if count%1000 == 0 { fmt.Printf("%d...", count) } count++ } } fmt.Println("done.") logging.Info("Inserted %d quotes.\n", count) }
func main() { flag.Parse() logging.InitFromFlags() // Let's go find some mongo. db.Init() defer db.Close() fc := factoids.Init() // A communication channel of Factoids. facts := make(chan *factoids.Factoid) ptrs := make(chan []interface{}) rows := make(chan []interface{}) // Function to execute some queries on the SQLite db and shove the results // into the ptrs and rows channels created above. db_query := func(dbh *sqlite3.Database) { _, err := dbh.Execute("SELECT * FROM Factoids WHERE Value LIKE '%*%';", feeder(ptrs)) close(ptrs) if err != nil { logging.Error("DB error: %s", err) } n, err := dbh.Execute("SELECT * FROM Factoids;", feeder(rows)) close(rows) if err == nil { logging.Info("Read %d rows from database.", n) } else { logging.Error("DB error: %s", err) } } go func() { sqlite3.Session(*file, db_query) }() // First, synchronously read all the stuff from the ptrs channel // and build a set of all the factoid keys that are used as pointers for row := range ptrs { for _, val := range parseMultipleValues(toString(row[cValue])) { if key, _, _ := util.FactPointer(val); key != "" { ptrkeys[key] = true } } } // Now run another goroutine to munge the rows into factoids. // This was originally done inside the SQLite callbacks, but // cgo or sqlite3 obscures runtime panics and makes fail happen. go func() { for row := range rows { parseFactoid(row, facts) } close(facts) }() // And finally... count := 0 var err error for fact := range facts { // ... push each fact into mongo err = fc.Insert(fact) if err != nil { logging.Error("Awww: %v\n", err) } else { if count%1000 == 0 { fmt.Printf("%d...", count) } count++ } } fmt.Println("done.") logging.Info("Inserted %d factoids.\n", count) }
func main() { flag.Parse() logging.InitFromFlags() golog.Init() // Slightly more random than 1. rand.Seed(time.Now().UnixNano() * int64(os.Getpid())) // Initialise bot state bot.Init() // Connect to mongo db.Init() defer db.Close() // Add drivers calcdriver.Init() decisiondriver.Init() factdriver.Init() karmadriver.Init() markovdriver.Init() netdriver.Init() quotedriver.Init() reminddriver.Init() seendriver.Init() statsdriver.Init() urldriver.Init() // Start up the HTTP server go http.ListenAndServe(*httpPort, nil) // Set up a signal handler to shut things down gracefully. // NOTE: net/http doesn't provide for graceful shutdown :-/ go func() { called := new(int32) sigint := make(chan os.Signal, 1) signal.Notify(sigint, syscall.SIGINT) for _ = range sigint { if atomic.AddInt32(called, 1) > 1 { logging.Fatal("Recieved multiple interrupts, dying.") } bot.Shutdown() } }() // Connect the bot to IRC and wait; reconnects are handled automatically. // If we get true back from the bot, re-exec the (rebuilt) binary. if <-bot.Connect() { // Calling syscall.Exec probably means deferred functions won't get // called, so disconnect from mongodb first for politeness' sake. db.Close() // If sp0rkle was run from PATH, we need to do that lookup manually. fq, _ := exec.LookPath(os.Args[0]) logging.Warn("Re-executing sp0rkle with args '%v'.", os.Args) err := syscall.Exec(fq, os.Args, os.Environ()) if err != nil { // hmmmmmm logging.Fatal("Couldn't re-exec sp0rkle: %v", err) } } logging.Info("Shutting down cleanly.") }
func main() { flag.Parse() logging.InitFromFlags() // Let's go find some mongo. db.Init() defer db.Close() uc := urls.Init() work := make(chan *urls.Url) quit := make(chan bool) urls := make(chan *urls.Url) rows := make(chan []interface{}) failed := 0 // If we're checking, spin up some workers if *check { for i := 1; i <= *workq; i++ { go func(n int) { count := 0 for u := range work { count++ logging.Debug("w%02d r%04d: Fetching '%s'", n, count, u.Url) res, err := http.Head(u.Url) logging.Debug("w%02d r%04d: Response '%s'", n, count, res.Status) if err == nil && res.StatusCode == 200 { urls <- u } else { failed++ } } quit <- true }(i) } } // Function to feed rows into the rows channel. row_feeder := func(sth *sqlite3.Statement, row ...interface{}) { rows <- row } // Function to execute a query on the SQLite db. db_query := func(dbh *sqlite3.Database) { n, err := dbh.Execute("SELECT * FROM urls;", row_feeder) if err == nil { logging.Info("Read %d rows from database.\n", n) } else { logging.Error("DB error: %s\n", err) } } // Open up the URL database in a goroutine and feed rows // in on the input_rows channel. go func() { sqlite3.Session(*file, db_query) // once we've done the query, close the channel to indicate this close(rows) }() // Another goroutine to munge the rows into Urls and optionally send // them to the pool of checker goroutines. go func() { for row := range rows { u := parseUrl(row) if *check { work <- u } else { urls <- u } } if *check { // Close work channel and wait for all workers to quit. close(work) for i := 0; i < *workq; i++ { <-quit } } close(urls) }() // And finally... count := 0 var err error for u := range urls { // ... push each url into mongo err = uc.Insert(u) if err != nil { logging.Error("Awww: %v\n", err) } else { if count%1000 == 0 { fmt.Printf("%d...", count) } count++ } } fmt.Println("done.") if *check { logging.Info("Dropped %d non-200 urls.", failed) } logging.Info("Inserted %d urls.", count) }