Beispiel #1
0
// TODO(fluffle): reduce duplication with lib/factoids?
func (qc *Collection) GetPseudoRand(regex string) *Quote {
	lookup := bson.M{}
	if regex != "" {
		// Only perform a regex lookup if there's a regex to match against,
		// otherwise this just fetches a quote at pseudo-random.
		lookup["quote"] = bson.M{"$regex": regex, "$options": "i"}
	}
	ids, ok := qc.seen[regex]
	if ok && len(ids) > 0 {
		logging.Debug("Looked for quotes matching '%s' before, %d stored id's",
			regex, len(ids))
		lookup["_id"] = bson.M{"$nin": ids}
	}
	query := qc.Find(lookup)
	count, err := query.Count()
	if err != nil {
		logging.Warn("Count for quote lookup '%s' failed: %s", regex, err)
		return nil
	}
	if count == 0 {
		if ok {
			// Looked for this regex before, but nothing matches now
			delete(qc.seen, regex)
		}
		return nil
	}
	var res Quote
	if count > 1 {
		query = query.Skip(rand.Intn(count))
	}
	if err = query.One(&res); err != nil {
		logging.Warn("Fetch for quote lookup '%s' failed: %s", regex, err)
		return nil
	}
	if count != 1 {
		if !ok {
			// only store seen for regex that match more than one quote
			logging.Debug("Creating seen data for regex '%s'.", regex)
			qc.seen[regex] = make([]bson.ObjectId, 0, count)
		}
		logging.Debug("Storing id %v for regex '%s'.", res.Id, regex)
		qc.seen[regex] = append(qc.seen[regex], res.Id)
	} else if ok {
		// if the count of results is 1 and we're storing seen data for regex
		// then we've exhausted the possible results and should wipe it
		logging.Debug("Zeroing seen data for regex '%s'.", regex)
		delete(qc.seen, regex)
	}
	return &res
}
Beispiel #2
0
func (fc *Collection) InfoMR(key string) *FactoidInfo {
	mr := &mgo.MapReduce{
		Map: `function() { emit("count", {
			accessed: this.accessed.count,
			modified: this.modified.count,
			created: this.created.count,
		})}`,
		Reduce: `function(k,l) {
			var sum = { accessed: 0, modified: 0, created: 0 };
			for each (var v in l) {
				sum.accessed += v.accessed;
				sum.modified += v.modified;
				sum.created  += v.created;
			}
			return sum;
		}`,
	}
	var res []struct {
		Id    int `bson:"_id"`
		Value FactoidInfo
	}
	info, err := fc.Find(lookup(key)).MapReduce(mr, &res)
	if err != nil || len(res) == 0 {
		logging.Warn("Info MR for '%s' failed: %v", key, err)
		return nil
	} else {
		logging.Debug("Info MR mapped %d, emitted %d, produced %d in %d ms.",
			info.InputCount, info.EmitCount, info.OutputCount, info.Time/1e6)
	}
	return &res[0].Value
}
Beispiel #3
0
func githubWatcher(ctx *bot.Context) {
	// Watch #sp0rklf for IRC messages about issues coming from github.
	if ctx.Nick != "fluffle\\sp0rkle" || ctx.Target() != "#sp0rklf" ||
		!strings.Contains(ctx.Text(), "issue #") {
		return
	}

	text := util.RemoveColours(ctx.Text()) // srsly github why colours :(
	l := &util.Lexer{Input: text}
	l.Find(' ')
	text = text[l.Pos()+1:]
	l.Find('#')
	l.Next()
	issue := int(l.Number())

	labels, _, err := gh.Issues.ListLabelsByIssue(
		githubUser, githubRepo, issue, &github.ListOptions{})
	if err != nil {
		logging.Error("Error getting labels for issue %d: %v", issue, err)
		return
	}
	for _, l := range labels {
		kv := strings.Split(*l.Name, ":")
		if len(kv) == 2 && kv[0] == "nick" {
			logging.Debug("Recording tell for %s about issue %d.", kv[1], issue)
			r := reminders.NewTell("that "+text, bot.Nick(kv[1]), "github", "")
			if err := rc.Insert(r); err != nil {
				logging.Error("Error inserting github tell: %v", err)
			}
		}
	}
}
Beispiel #4
0
func (fc *Collection) GetPseudoRand(key string) *Factoid {
	lookup := lookup(key)
	ids, ok := fc.seen[key]
	if ok && len(ids) > 0 {
		logging.Debug("Seen '%s' before, %d stored id's", key, len(ids))
		lookup["_id"] = bson.M{"$nin": ids}
	}
	query := fc.Find(lookup)
	count, err := query.Count()
	if err != nil {
		logging.Debug("Counting for key failed: %v", err)
		return nil
	}
	if count == 0 {
		if ok {
			// we've seen this before, but people have deleted it since.
			delete(fc.seen, key)
		}
		return nil
	}
	var res Factoid
	if count > 1 {
		query = query.Skip(rand.Intn(count))
	}
	if err = query.One(&res); err != nil {
		logging.Warn("Fetching factoid for key failed: %v", err)
		return nil
	}
	if count != 1 {
		if !ok {
			// only store seen for keys that have more than one factoid
			logging.Debug("Creating seen data for key '%s'.", key)
			fc.seen[key] = make([]bson.ObjectId, 0, count)
		}
		logging.Debug("Storing id %v for key '%s'.", res.Id, key)
		fc.seen[key] = append(fc.seen[key], res.Id)
	} else if ok {
		// if the count of results is 1 and we're storing seen data for key
		// then we've exhausted the possible results and should wipe it
		logging.Debug("Zeroing seen data for key '%s'.", key)
		delete(fc.seen, key)
	}
	return &res
}
Beispiel #5
0
func (sc *Collection) SeenAnyMatching(rx string) []string {
	var res []string
	q := sc.Find(bson.M{"key": bson.M{"$regex": rx, "$options": "i"}}).Sort("-timestamp")
	if err := q.Distinct("key", &res); err != nil {
		logging.Warn("SeenAnyMatching Find error: %v", err)
		return []string{}
	}
	logging.Debug("Looked for matches, found %#v", res)
	return res
}
Beispiel #6
0
func check_rebuilder(cmd string, ctx *Context) bool {
	s := strings.Split(GetSecret(*rebuilder), ":")
	logging.Debug("Rebuild secret: %#v", s)
	if s[0] == "" || s[0] != ctx.Nick || !strings.HasPrefix(ctx.Text(), cmd) {
		return false
	}
	if len(s) > 1 && ctx.Text() != fmt.Sprintf("%s %s", cmd, s[1]) {
		return false
	}
	return true
}
Beispiel #7
0
func (migrator) Migrate() error {
	var all []Entry
	mongo.Init(db.Mongo, COLLECTION, mongoIndexes)
	if err := mongo.All(db.K{}, &all); err != nil {
		return err
	}
	for _, e := range all {
		logging.Debug("Migrating conf entry %s.", e)
		Bolt(e.Ns).Value(e.Key, e.Value)
	}
	return nil
}
Beispiel #8
0
func (mcs *mcStatus) Poll(ctxs []*bot.Context) {
	srv := mcConf.String(mcServer)
	logging.Debug("polling minecraft server at %s", srv)
	st, err := pollServer(srv)
	if err != nil {
		logging.Error("minecraft poll failed: %v", err)
		return
	}
	*mcs = *st
	for _, ctx := range ctxs {
		ctx.Topic(mcConf.String(mcChan))
	}
}
Beispiel #9
0
// Write a \r\n terminated line of output to the connected server,
// using Hybrid's algorithm to rate limit if conn.cfg.Flood is false.
func (conn *Conn) write(line string) {
	if !conn.cfg.Flood {
		if t := conn.rateLimit(len(line)); t != 0 {
			// sleep for the current line's time value before sending it
			logging.Debug("irc.rateLimit(): Flood! Sleeping for %.2f secs.",
				t.Seconds())
			<-time.After(t)
		}
	}

	if _, err := conn.io.WriteString(line + "\r\n"); err != nil {
		logging.Error("irc.send(): %s", err.Error())
		conn.shutdown()
		return
	}
	if err := conn.io.Flush(); err != nil {
		logging.Error("irc.send(): %s", err.Error())
		conn.shutdown()
		return
	}
	logging.Debug("-> %s", line)
}
Beispiel #10
0
// pollerSet handles both CONNECTED and DISCONNECTED events
func (ps *pollerSet) Handle(conn *client.Conn, line *client.Line) {
	ps.Lock()
	defer ps.Unlock()
	switch line.Cmd {
	case client.CONNECTED:
		ps.conns[conn] = context(conn, line)
		logging.Debug("Conn: # conns: %d, # pollers: %d", len(ps.conns), len(ps.set))
		if len(ps.conns) == 1 {
			for p := range ps.set {
				ps.set[p] = ps.startOne(p)
			}
		}
	case client.DISCONNECTED:
		delete(ps.conns, conn)
		logging.Debug("Disc: # conns: %d, # pollers: %d", len(ps.conns), len(ps.set))
		if len(ps.conns) == 0 {
			for p, quit := range ps.set {
				close(quit)
				ps.set[p] = nil
			}
		}
	}
}
Beispiel #11
0
func (bucket *boltBucket) Put(key Key, value interface{}) error {
	return bucket.db.Update(func(tx *bolt.Tx) error {
		b, k, err := bucketFor(key, tx.Bucket(bucket.name))
		if err != nil {
			return err
		}
		if len(k) == 0 {
			return errors.New("put: zero length key")
		}
		data, err := bson.Marshal(value)
		if err != nil {
			return err
		}
		if bucket.debug {
			logging.Debug("Put(%s): %s = %q", bucket.name, key, data)
		}
		return b.Put(k, data)
	})
}
Beispiel #12
0
func (bucket *boltBucket) Get(key Key, value interface{}) error {
	return bucket.db.View(func(tx *bolt.Tx) error {
		b, k, err := bucketFor(key, tx.Bucket(bucket.name))
		if err != nil {
			return err
		}
		if len(k) == 0 {
			return errors.New("get: zero length key")
		}
		data := b.Get(k)
		if bucket.debug {
			logging.Debug("Get(%s): %s = %q", bucket.name, key, data)
		}
		if data == nil || len(data) == 0 {
			return nil
		}
		return bson.Unmarshal(data, value)
	})
}
Beispiel #13
0
// receive one \r\n terminated line from peer, parse and dispatch it
func (conn *Conn) recv() {
	for {
		s, err := conn.io.ReadString('\n')
		if err != nil {
			logging.Error("irc.recv(): %s", err.Error())
			conn.shutdown()
			return
		}
		s = strings.Trim(s, "\r\n")
		logging.Debug("<- %s", s)

		if line := parseLine(s); line != nil {
			line.Time = time.Now()
			conn.in <- line
		} else {
			logging.Warn("irc.recv(): problems parsing line:\n  %s", s)
		}
	}
}
Beispiel #14
0
func seenCmd(ctx *bot.Context) {
	s := strings.Fields(ctx.Text())
	if len(s) == 2 {
		// Assume we have "seen <nick> <action>"
		if n := sc.LastSeenDoing(s[0], strings.ToUpper(s[1])); n != nil {
			ctx.ReplyN("%s", n)
			return
		}
	}
	// Not specifically asking for that action, or no matching action.
	if n := sc.LastSeen(s[0]); n != nil {
		ctx.ReplyN("%s", n)
		return
	}
	// No exact matches for nick found, look for possible partial matches.
	if m := sc.SeenAnyMatching(s[0]); len(m) > 0 {
		if len(m) == 1 {
			if n := sc.LastSeen(m[0]); n != nil {
				ctx.ReplyN("1 possible match: %s", n)
			}
		} else if len(m) > 10 {
			ctx.ReplyN("%d possible matches, first 10 are: %s.",
				len(m), strings.Join(m[:9], ", "))

		} else {
			ctx.ReplyN("%d possible matches: %s.",
				len(m), strings.Join(m, ", "))

		}
		return
	}
	// No partial matches found. Check for people playing silly buggers.
	for _, w := range wittyComebacks {
		logging.Debug("Matching %#v...", w)
		if w.rx.MatchString(ctx.Text()) {
			ctx.ReplyN("%s", w.resp)
			return
		}
	}
	// Ok, probably a genuine query.
	ctx.ReplyN("Haven't seen %s before, sorry.", ctx.Text())
}
Beispiel #15
0
func (udc udCache) fetch(term string) (entry udCacheEntry, ok bool, err error) {
	udc.prune()
	entry, ok = udc[term]
	if ok {
		return
	}
	entry.result = &udResult{}
	data, err := get(fmt.Sprintf(udUrl, url.QueryEscape(term)))
	if err != nil {
		return
	}
	if err = json.Unmarshal(data, entry.result); err != nil {
		logging.Debug("JSON: %s", data)
		return
	}
	// Abuse Pages and Total for our own ends here
	entry.result.Pages, entry.result.Total = -1, len(entry.result.List)
	entry.stamp = time.Now()
	udc[term] = entry
	return
}
Beispiel #16
0
func (ps *pollerSet) startOne(p Poller) chan struct{} {
	if len(ps.conns) == 0 {
		return nil
	}
	logging.Debug("Starting poller %#v at %s intervals.", p, p.Tick())
	tick := time.NewTicker(p.Tick())
	quit := make(chan struct{})
	go func() {
		p.Start()
		for {
			select {
			case <-tick.C:
				p.Poll(ps.contexts())
			case <-quit:
				tick.Stop()
				p.Stop()
				return
			}
		}
	}()
	return quit
}
Beispiel #17
0
func main() {
	flag.Parse()
	logging.InitFromFlags()

	// Let's go find some mongo.
	db.Init()
	defer db.Close()
	uc := urls.Init()

	work := make(chan *urls.Url)
	quit := make(chan bool)
	urls := make(chan *urls.Url)
	rows := make(chan []interface{})
	failed := 0

	// If we're checking, spin up some workers
	if *check {
		for i := 1; i <= *workq; i++ {
			go func(n int) {
				count := 0
				for u := range work {
					count++
					logging.Debug("w%02d r%04d: Fetching '%s'", n, count, u.Url)
					res, err := http.Head(u.Url)
					logging.Debug("w%02d r%04d: Response '%s'", n, count, res.Status)
					if err == nil && res.StatusCode == 200 {
						urls <- u
					} else {
						failed++
					}
				}
				quit <- true
			}(i)
		}
	}

	// Function to feed rows into the rows channel.
	row_feeder := func(sth *sqlite3.Statement, row ...interface{}) {
		rows <- row
	}

	// Function to execute a query on the SQLite db.
	db_query := func(dbh *sqlite3.Database) {
		n, err := dbh.Execute("SELECT * FROM urls;", row_feeder)
		if err == nil {
			logging.Info("Read %d rows from database.\n", n)
		} else {
			logging.Error("DB error: %s\n", err)
		}
	}

	// Open up the URL database in a goroutine and feed rows
	// in on the input_rows channel.
	go func() {
		sqlite3.Session(*file, db_query)
		// once we've done the query, close the channel to indicate this
		close(rows)
	}()

	// Another goroutine to munge the rows into Urls and optionally send
	// them to the pool of checker goroutines.
	go func() {
		for row := range rows {
			u := parseUrl(row)
			if *check {
				work <- u
			} else {
				urls <- u
			}
		}
		if *check {
			// Close work channel and wait for all workers to quit.
			close(work)
			for i := 0; i < *workq; i++ {
				<-quit
			}
		}
		close(urls)
	}()

	// And finally...
	count := 0
	var err error
	for u := range urls {
		// ... push each url into mongo
		err = uc.Insert(u)
		if err != nil {
			logging.Error("Awww: %v\n", err)
		} else {
			if count%1000 == 0 {
				fmt.Printf("%d...", count)
			}
			count++
		}
	}
	fmt.Println("done.")
	if *check {
		logging.Info("Dropped %d non-200 urls.", failed)
	}
	logging.Info("Inserted %d urls.", count)
}
Beispiel #18
0
func (ps *pollerSet) Add(p Poller) {
	ps.Lock()
	defer ps.Unlock()
	ps.set[p] = ps.startOne(p)
	logging.Debug("Add: # conns: %d, # pollers: %d", len(ps.conns), len(ps.set))
}