Example #1
0
// exitOnRecover is used to circumvent the recover handler that net/http
// installs. We need to exit in order to get restarted by the init
// system/supervisor and get into a clean state again.
func exitOnRecover() {
	if r := recover(); r != nil {
		// This mimics go/src/net/http/server.go.
		const size = 64 << 10
		buf := make([]byte, size)
		buf = buf[:runtime.Stack(buf, false)]
		glog.Errorf("http: panic serving: %v\n%s", r, buf)
		glog.Flush()
		os.Exit(1)
	}
}
Example #2
0
func main() {
	flag.Usage = func() {
		// It is unfortunate that we need to re-implement flag.PrintDefaults(),
		// but I cannot see any other way to achieve the grouping of flags.
		fmt.Fprintf(os.Stderr, "RobustIRC server (= node)\n")
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are REQUIRED:\n")
		printDefault(flag.Lookup("network_name"))
		printDefault(flag.Lookup("network_password"))
		printDefault(flag.Lookup("peer_addr"))
		printDefault(flag.Lookup("tls_cert_path"))
		printDefault(flag.Lookup("tls_key_path"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are only relevant when bootstrapping the network (once):\n")
		printDefault(flag.Lookup("join"))
		printDefault(flag.Lookup("singlenode"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are optional:\n")
		printDefault(flag.Lookup("dump_canary_state"))
		printDefault(flag.Lookup("dump_heap_profile"))
		printDefault(flag.Lookup("canary_compaction_start"))
		printDefault(flag.Lookup("listen"))
		printDefault(flag.Lookup("raftdir"))
		printDefault(flag.Lookup("tls_ca_file"))
		printDefault(flag.Lookup("version"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are optional and provided by glog:\n")
		printDefault(flag.Lookup("alsologtostderr"))
		printDefault(flag.Lookup("log_backtrace_at"))
		printDefault(flag.Lookup("log_dir"))
		printDefault(flag.Lookup("log_total_bytes"))
		printDefault(flag.Lookup("logtostderr"))
		printDefault(flag.Lookup("stderrthreshold"))
		printDefault(flag.Lookup("v"))
		printDefault(flag.Lookup("vmodule"))
	}
	flag.Parse()

	// Store logs in -raftdir, unless otherwise specified.
	if flag.Lookup("log_dir").Value.String() == "" {
		flag.Set("log_dir", *raftDir)
	}

	defer glog.Flush()
	glog.MaxSize = 64 * 1024 * 1024
	glog.CopyStandardLogTo("INFO")

	log.Printf("RobustIRC %s\n", Version)
	if *version {
		return
	}

	if _, err := os.Stat(filepath.Join(*raftDir, "deletestate")); err == nil {
		if err := os.RemoveAll(*raftDir); err != nil {
			log.Fatal(err)
		}
		if err := os.Mkdir(*raftDir, 0700); err != nil {
			log.Fatal(err)
		}
		log.Printf("Deleted %q because %q existed\n", *raftDir, filepath.Join(*raftDir, "deletestate"))
	}

	if err := outputstream.DeleteOldDatabases(*raftDir); err != nil {
		log.Fatalf("Could not delete old outputstream databases: %v\n", err)
	}

	if err := deleteOldCompactionDatabases(*raftDir); err != nil {
		glog.Errorf("Could not delete old compaction databases: %v (ignoring)\n", err)
	}

	log.Printf("Initializing RobustIRC…\n")

	if *networkPassword == "" {
		*networkPassword = os.Getenv("ROBUSTIRC_NETWORK_PASSWORD")
	}
	if *networkPassword == "" {
		log.Fatalf("-network_password not set. You MUST protect your network.\n")
	}
	digest := sha1.New()
	digest.Write([]byte(*networkPassword))
	passwordHash := "{SHA}" + base64.StdEncoding.EncodeToString(digest.Sum(nil))

	if *network == "" {
		log.Fatalf("-network_name not set, but required.\n")
	}

	if *peerAddr == "" {
		log.Printf("-peer_addr not set, initializing to %q. Make sure %q is a host:port string that other raft nodes can connect to!\n", *listen, *listen)
		*peerAddr = *listen
	}

	ircServer = ircserver.NewIRCServer(*raftDir, *network, time.Now())

	transport := rafthttp.NewHTTPTransport(
		*peerAddr,
		// Not deadlined, otherwise snapshot installments fail.
		robusthttp.Client(*networkPassword, false),
		nil,
		"")

	peerStore = raft.NewJSONPeers(*raftDir, transport)

	if *join == "" && !*singleNode {
		peers, err := peerStore.Peers()
		if err != nil {
			log.Fatal(err.Error())
		}
		if len(peers) == 0 {
			if !*timesafeguard.DisableTimesafeguard {
				log.Fatalf("No peers known and -join not specified. Joining the network is not safe because timesafeguard cannot be called.\n")
			}
		} else {
			if len(peers) == 1 && peers[0] == *peerAddr {
				// To prevent crashlooping too frequently in case the init system directly restarts our process.
				time.Sleep(10 * time.Second)
				log.Fatalf("Only known peer is myself (%q), implying this node was removed from the network. Please kill the process and remove the data.\n", *peerAddr)
			}
			if err := timesafeguard.SynchronizedWithNetwork(*peerAddr, peers, *networkPassword); err != nil {
				log.Fatal(err.Error())
			}
		}
	}

	var p []string

	config := raft.DefaultConfig()
	config.Logger = log.New(glog.LogBridgeFor("INFO"), "", log.Lshortfile)
	if *singleNode {
		config.EnableSingleNode = true
	}

	// Keep 5 snapshots in *raftDir/snapshots, log to stderr.
	fss, err := raft.NewFileSnapshotStore(*raftDir, 5, nil)
	if err != nil {
		log.Fatal(err)
	}

	// How often to check whether a snapshot should be taken. The check is
	// cheap, and the default value far too high for networks with a high
	// number of messages/s.
	// At the same time, it is important that we don’t check too early,
	// otherwise recovering from the most recent snapshot doesn’t work because
	// after recovering, a new snapshot (over the 0 committed messages) will be
	// taken immediately, effectively overwriting the result of the snapshot
	// recovery.
	config.SnapshotInterval = 300 * time.Second

	// Batch as many messages as possible into a single appendEntries RPC.
	// There is no downside to setting this too high.
	config.MaxAppendEntries = 1024

	// It could be that the heartbeat goroutine is not scheduled for a while,
	// so relax the default of 500ms.
	config.LeaderLeaseTimeout = timesafeguard.ElectionTimeout
	config.HeartbeatTimeout = timesafeguard.ElectionTimeout
	config.ElectionTimeout = timesafeguard.ElectionTimeout

	// We use prometheus, so hook up the metrics package (used by raft) to
	// prometheus as well.
	sink, err := metrics_prometheus.NewPrometheusSink()
	if err != nil {
		log.Fatal(err)
	}
	metrics.NewGlobal(metrics.DefaultConfig("raftmetrics"), sink)

	bootstrapping := *singleNode || *join != ""
	logStore, err := raft_store.NewLevelDBStore(filepath.Join(*raftDir, "raftlog"), bootstrapping)
	if err != nil {
		log.Fatal(err)
	}
	ircStore, err = raft_store.NewLevelDBStore(filepath.Join(*raftDir, "irclog"), bootstrapping)
	if err != nil {
		log.Fatal(err)
	}
	fsm := &FSM{
		store:             logStore,
		ircstore:          ircStore,
		lastSnapshotState: make(map[uint64][]byte),
	}
	logcache, err := raft.NewLogCache(config.MaxAppendEntries, logStore)
	if err != nil {
		log.Fatal(err)
	}

	node, err = raft.NewRaft(config, fsm, logcache, logStore, fss, peerStore, transport)
	if err != nil {
		log.Fatal(err)
	}

	if *dumpCanaryState != "" {
		canary(fsm, *dumpCanaryState)
		if *dumpHeapProfile != "" {
			debug.FreeOSMemory()
			f, err := os.Create(*dumpHeapProfile)
			if err != nil {
				log.Fatal(err)
			}
			defer f.Close()
			pprof.WriteHeapProfile(f)
		}
		return
	}

	go func() {
		for {
			secondsInState.WithLabelValues(node.State().String()).Inc()
			time.Sleep(1 * time.Second)
		}
	}()

	privaterouter := httprouter.New()
	privaterouter.Handler("GET", "/", exitOnRecoverHandleFunc(handleStatus))
	privaterouter.Handler("GET", "/irclog", exitOnRecoverHandleFunc(handleIrclog))
	privaterouter.Handler("POST", "/raft/*rest", exitOnRecoverHandler(transport))
	privaterouter.Handler("POST", "/join", exitOnRecoverHandleFunc(handleJoin))
	privaterouter.Handler("POST", "/part", exitOnRecoverHandleFunc(handlePart))
	privaterouter.Handler("GET", "/snapshot", exitOnRecoverHandleFunc(handleSnapshot))
	privaterouter.Handler("GET", "/leader", exitOnRecoverHandleFunc(handleLeader))
	privaterouter.Handler("POST", "/quit", exitOnRecoverHandleFunc(handleQuit))
	privaterouter.Handler("GET", "/config", exitOnRecoverHandleFunc(handleGetConfig))
	privaterouter.Handler("POST", "/config", exitOnRecoverHandleFunc(handlePostConfig))
	privaterouter.Handler("GET", "/metrics", exitOnRecoverHandler(prometheus.Handler()))

	publicrouter := httprouter.New()
	publicrouter.Handle("POST", "/robustirc/v1/:sessionid", exitOnRecoverHandle(handleCreateSession))
	publicrouter.Handle("POST", "/robustirc/v1/:sessionid/message", exitOnRecoverHandle(handlePostMessage))
	publicrouter.Handle("GET", "/robustirc/v1/:sessionid/messages", exitOnRecoverHandle(handleGetMessages))
	publicrouter.Handle("DELETE", "/robustirc/v1/:sessionid", exitOnRecoverHandle(handleDeleteSession))

	a := auth.NewBasicAuthenticator("robustirc", func(user, realm string) string {
		if user == "robustirc" {
			return passwordHash
		}
		return ""
	})

	http.Handle("/robustirc/", publicrouter)

	http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if username := a.CheckAuth(r); username == "" {
			a.RequireAuth(w, r)
		} else {
			privaterouter.ServeHTTP(w, r)
		}
	}))

	srv := http.Server{Addr: *listen}
	if err := http2.ConfigureServer(&srv, nil); err != nil {
		log.Fatal(err)
	}

	// Manually create the net.TCPListener so that joinMaster() does not run
	// into connection refused errors (the master will try to contact the
	// node before acknowledging the join).
	srv.TLSConfig.Certificates = make([]tls.Certificate, 1)
	srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(*tlsCertPath, *tlsKeyPath)
	if err != nil {
		log.Fatal(err)
	}

	ln, err := net.Listen("tcp", *listen)
	if err != nil {
		log.Fatal(err)
	}

	tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)
	go srv.Serve(tlsListener)

	log.Printf("RobustIRC listening on %q. For status, see %s\n",
		*peerAddr,
		fmt.Sprintf("https://*****:*****@%s/", *networkPassword, *peerAddr))

	if *join != "" {
		if err := timesafeguard.SynchronizedWithMasterAndNetwork(*peerAddr, *join, *networkPassword); err != nil {
			log.Fatal(err.Error())
		}

		p = joinMaster(*join, peerStore)
		// TODO(secure): properly handle joins on the server-side where the joining node is already in the network.
	}

	if len(p) > 0 {
		node.SetPeers(p)
	}

	expireSessionsTimer := time.After(expireSessionsInterval)
	secondTicker := time.Tick(1 * time.Second)
	for {
		select {
		case <-secondTicker:
			if node.State() == raft.Shutdown {
				log.Fatal("Node removed from the network (in raft state shutdown), terminating.")
			}
		case <-expireSessionsTimer:
			expireSessionsTimer = time.After(expireSessionsInterval)

			// Race conditions (a node becoming a leader or ceasing to be the
			// leader shortly before/after this runs) are okay, since the timer
			// is triggered often enough on every node so that it will
			// eventually run on the leader.
			if node.State() != raft.Leader {
				continue
			}

			applyMu.Lock()
			for _, msg := range ircServer.ExpireSessions() {
				// Cannot fail, no user input.
				msgbytes, _ := json.Marshal(msg)
				f := node.Apply(msgbytes, 10*time.Second)
				if err := f.Error(); err != nil {
					log.Printf("Apply(): %v\n", err)
					break
				}
			}
			applyMu.Unlock()
		}
	}
}
Example #3
0
func (s *robustSnapshot) Persist(sink raft.SnapshotSink) error {
	log.Printf("Filtering and writing %d indexes\n", s.lastIndex-s.firstIndex)

	// Get a timestamp and keep it constant, so that we only compact messages
	// older than n days from compactionStart. If we used time.Since, new
	// messages would pour into the window on every compaction round, possibly
	// making the compaction never converge.
	compactionStart := time.Now()
	log.Printf("compactionStart %s\n", compactionStart.String())
	if *canaryCompactionStart > 0 {
		compactionStart = time.Unix(0, *canaryCompactionStart)
		log.Printf("compactionStart %s (overridden with -canary_compaction_start)\n", compactionStart.String())
	}

	sessions := make(map[types.RobustId]bool)

	// First pass: just parse all the messages
	for i := s.firstIndex; i <= s.lastIndex; i++ {
		var nlog raft.Log
		if err := s.store.GetLog(i, &nlog); err != nil {
			s.del[i] = true
			continue
		}

		// TODO: compact raft messages as well, so that peer changes are not kept forever
		if nlog.Type != raft.LogCommand {
			continue
		}

		parsed := types.NewRobustMessageFromBytes(nlog.Data)
		s.parsed[i] = &parsed

		if parsed.Type == types.RobustCreateSession {
			s.sliced[parsed.Id.Id] = append(s.sliced[parsed.Id.Id], &parsed)
		}

		if parsed.Type == types.RobustDeleteSession {
			s.sliced[parsed.Session.Id] = append(s.sliced[parsed.Session.Id], &parsed)
		}

		if parsed.Type == types.RobustIRCFromClient {
			// TODO: skip PING/PRIVMSG messages that are outside of the
			// compaction window to reduce the working set. should be a noop
			// since no relevant* function looks at PRIVMSG/PING.
			sessions[parsed.Session] = true
			vmsgs, _ := ircServer.Get(types.RobustId{Id: parsed.Id.Id})

			onlyerrors := true
			for _, msg := range vmsgs {
				ircmsg := irc.ParseMessage(msg.Data)
				if ircmsg == nil {
					glog.Errorf("Output message not parsable\n")
					continue
				}
				if !errorCodes[ircmsg.Command] {
					onlyerrors = false
				}
			}
			if len(vmsgs) > 0 && onlyerrors {
				s.del[i] = true
				continue
			}

			// Kind of a hack: we need to keep track of which sessions are
			// services connections and which are not, so that we can look at
			// the correct relevant-function (e.g. server_NICK vs. NICK).
			ircmsg := irc.ParseMessage(parsed.Data)
			if ircmsg != nil && strings.ToUpper(ircmsg.Command) == "SERVER" {
				s.servers[parsed.Session.Id] = true
			}

			// Every session which is interested in at least one of the output
			// messages gets a pointer to the input message stored in s.sliced
			// so that we can easily iterate over all relevant input messages.
			for session := range sessions {
				interested := false
				for _, msg := range vmsgs {
					if msg.InterestingFor[session.Id] {
						interested = true
						break
					}
				}
				// All messages that would be delivered to a session are
				// interesting for compaction, but also just any message that a
				// specific session sent.
				if interested || (len(vmsgs) > 0 && session.Id == parsed.Session.Id) {
					s.sliced[session.Id] = append(s.sliced[session.Id], &parsed)
				}
			}

			// Some messages don’t result in output messages, such as a JOIN
			// command for a channel the user is already in. We mark these as
			// interesting at least to the session from which they originated,
			// so that they can be detected and deleted.
			// TODO: would it be okay to just mark them for deletion? i.e., do all messages that modify state return a result?
			if len(vmsgs) == 0 {
				s.sliced[parsed.Session.Id] = append(s.sliced[parsed.Session.Id], &parsed)
			}
		}
	}

	log.Printf("got %d sessions\n", len(sessions))
	for session := range sessions {
		log.Printf("session 0x%x has %d messages\n", session.Id, len(s.sliced[session.Id]))
	}

	// We repeatedly compact, since the result of one compaction can affect the
	// result of other compactions (see compaction_test.go for examples).
	changed := true
	pass := 0
	for changed {
		log.Printf("Compaction pass %d\n", pass)
		pass++
		changed = false
		for i := s.firstIndex; i <= s.lastIndex; i++ {
			if i%1000 == 0 {
				log.Printf("message %d of %d (%.0f%%)\n",
					i, s.lastIndex, (float64(i)/float64(s.lastIndex))*100.0)
			}
			if s.del[i] {
				continue
			}

			msg, ok := s.parsed[i]
			if !ok {
				continue
			}

			if compactionStart.Sub(time.Unix(0, msg.Id.Id)) < 7*24*time.Hour {
				// If we ran outside the window, we don’t even need to look at
				// any newer messages anymore.
				break
			}

			session := msg.Session
			if msg.Type == types.RobustCreateSession {
				session = msg.Id
			}

			canCompact, slicedIdx, err := s.canCompact(session, msg, i)
			if err != nil {
				sink.Cancel()
				return err
			}
			if canCompact {
				s.del[i] = true
				if slicedIdx != -1 {
					s.sliced[session.Id][slicedIdx] = nil
				}
				changed = true
			}
		}
	}

	encoder := json.NewEncoder(sink)
	for i := s.firstIndex; i <= s.lastIndex; i++ {
		if s.del[i] {
			continue
		}

		var elog raft.Log

		if err := s.store.GetLog(i, &elog); err != nil {
			continue
		}

		if err := encoder.Encode(elog); err != nil {
			sink.Cancel()
			return err
		}
	}

	sink.Close()

	for idx, del := range s.del {
		if !del {
			continue
		}
		nmsg, ok := s.parsed[idx]
		// If the message was not found in parsed, then there was no message
		// with this index, hence there is nothing to delete.
		if !ok {
			continue
		}
		// TODO: Since outputstream uses a LevelDB database, we could be more
		// efficient and use batch deletions.
		if err := ircServer.Delete(nmsg.Id); err != nil {
			log.Panicf("Could not delete outputstream message: %v\n", err)
		}
		s.store.DeleteRange(idx, idx)
	}

	return nil
}
Example #4
0
func canary(fsm raft.FSM, statePath string) {
	// Create a snapshot (only creates metadata) and persist it (does the
	// actual compaction). Afterwards we have access to |rs.parsed| (all
	// raft log entries, but parsed) and |rs.del| (all messages which were
	// just compacted).
	log.Printf("Compacting before dumping state\n")

	fsm.(*FSM).skipDeletionForCanary = true

	snapshot, err := fsm.Snapshot()
	if err != nil {
		log.Fatalf("fsm.Snapshot(): %v\n", err)
	}

	rs, ok := snapshot.(*robustSnapshot)
	if !ok {
		log.Fatalf("snapshot is not a robustSnapshot")
	}

	sink, err := raft.NewDiscardSnapshotStore().Create(rs.lastIndex, 1, []byte{})
	if err != nil {
		log.Fatalf("DiscardSnapshotStore.Create(): %v\n", err)
	}

	if err := snapshot.Persist(sink); err != nil {
		log.Fatalf("snapshot.Persist(): %v\n", err)
	}

	sink.Close()

	// Dump the in-memory state into a file, to be read by robustirc-canary.
	f, err := os.Create(statePath)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()

	log.Printf("Dumping state for robustirc-canary into %q\n", statePath)

	enc := json.NewEncoder(f)

	iterator := rs.store.GetBulkIterator(rs.firstIndex, rs.lastIndex+1)
	defer iterator.Release()
	available := iterator.First()
	for available {
		var nlog raft.Log
		if err := iterator.Error(); err != nil {
			glog.Errorf("Error while iterating through the log: %v", err)
			available = iterator.Next()
			continue
		}
		idx := binary.BigEndian.Uint64(iterator.Key())
		value := iterator.Value()
		if err := json.Unmarshal(value, &nlog); err != nil {
			glog.Errorf("Skipping log entry %d because of a JSON unmarshaling error: %v", idx, err)
			continue
		}
		available = iterator.Next()

		// TODO: compact raft messages as well, so that peer changes are not kept forever
		if nlog.Type != raft.LogCommand {
			continue
		}

		nmsg := types.NewRobustMessageFromBytes(nlog.Data)
		if time.Unix(0, nmsg.Id.Id).Before(rs.compactionEnd) {
			continue
		}

		// TODO: come up with pseudo-values for createsession/deletesession
		if nmsg.Type != types.RobustIRCFromClient {
			continue
		}
		ircmsg := irc.ParseMessage(nmsg.Data)
		if ircmsg.Command == irc.PING || ircmsg.Command == irc.PONG {
			continue
		}
		vmsgs, _ := ircServer.Get(nmsg.Id)
		cm := canaryMessageState{
			Id:        idx,
			Session:   nmsg.Session.Id,
			Input:     util.PrivacyFilterIrcmsg(ircmsg).String(),
			Output:    make([]canaryMessageOutput, len(vmsgs)),
			Compacted: false,
		}
		for idx, vmsg := range vmsgs {
			ifc := make(map[string]bool)
			for k, v := range vmsg.InterestingFor {
				ifc["0x"+strconv.FormatInt(k, 16)] = v
			}
			cm.Output[idx] = canaryMessageOutput{
				Text:           util.PrivacyFilterIrcmsg(irc.ParseMessage(vmsg.Data)).String(),
				InterestingFor: ifc,
			}
		}
		if err := enc.Encode(&cm); err != nil {
			log.Fatal(err)
		}
	}
}
Example #5
0
func (s *robustSnapshot) canCompact(session types.RobustId, msg *types.RobustMessage, logIndex uint64) (bool, int, error) {
	slicedIdx := -1
	// TODO: Instead of doing a full scan, we should be able to use binary
	// search here since the message IDs are strictly monotonically increasing
	// and appended in-order, hence sorted.
	for idx, smsg := range s.sliced[session.Id] {
		if smsg != nil && smsg.Id == msg.Id {
			slicedIdx = idx
		}
	}
	if slicedIdx == -1 && msg.Type != types.RobustConfig && msg.Type != types.RobustMessageOfDeath {
		log.Printf("WARNING: message with id %v (logidx %d), type %s for session %v (data = %s) not found in s.sliced, using slow path\n", msg.Id, logIndex, msg.Type, msg.Session, msg.Data)
		p := irc.ParseMessage(msg.Data)
		if p != nil {
			log.Printf(" (parsed: %s)\n", p.Bytes())
		}
	}

	// TODO: deprecate get, prevSlow, nextSlow once there have been no fallbacks to the slow path for a week.

	// The prev and next functions are cursors, see ircserver’s StillRelevant
	// function. They return the previous message (or next message,
	// respectively).
	get := func(index uint64, wantType types.RobustType) *types.RobustMessage {
		if s.del[index] {
			return nil
		}

		nmsg, ok := s.parsed[index]
		if !ok {
			return nil
		}

		if wantType != types.RobustAny && wantType != nmsg.Type {
			return nil
		}

		if nmsg.Session != session &&
			(nmsg.Type != types.RobustCreateSession ||
				nmsg.Id != session) {
			return nil
		}
		return nmsg
	}

	nextIndex := logIndex
	nextIndexSliced := slicedIdx
	trace := false

	reset := func() {
		nextIndex = logIndex
		nextIndexSliced = slicedIdx
	}

	prevSlow := func(wantType types.RobustType) (*types.RobustMessage, error) {
		for {
			nextIndex--

			if nextIndex < s.firstIndex {
				return nil, ircserver.CursorEOF
			}

			if ircmsg := get(nextIndex, wantType); ircmsg != nil {
				if trace {
					log.Printf("Returning idx=%d: %v\n", nextIndex, ircmsg.Data)
				}
				return ircmsg, nil
			}
		}
	}

	prevFast := func(wantType types.RobustType) (*types.RobustMessage, error) {
		if nextIndexSliced == -1 {
			glog.Errorf("Compaction: falling back to prevSlow() for log index %d, msg id %v\n", logIndex, msg.Id)
			return prevSlow(wantType)
		}
		for {
			nextIndexSliced--
			if nextIndexSliced < 0 {
				return nil, ircserver.CursorEOF
			}
			nmsg := s.sliced[session.Id][nextIndexSliced]

			if nmsg == nil {
				continue
			}

			// TODO: introduce a parameter so that the relevant* functions
			// can specify whether they want just messages from _the same_
			// session or messages that are relevant to the session.
			if nmsg.Session != session &&
				(nmsg.Type != types.RobustCreateSession ||
					nmsg.Id != session) {
				continue
			}

			if wantType == types.RobustAny || wantType == nmsg.Type {
				if trace {
					log.Printf("Returning sliceidx=%d: %v\n", nextIndexSliced, nmsg.Data)
				}
				return nmsg, nil
			}
		}
	}

	nextSlow := func(wantType types.RobustType) (*types.RobustMessage, error) {
		for {
			nextIndex++

			if nextIndex > s.lastIndex {
				return nil, ircserver.CursorEOF
			}

			if ircmsg := get(nextIndex, wantType); ircmsg != nil {
				if trace {
					log.Printf("Returning idx=%d: %v\n", nextIndex, ircmsg.Data)
				}
				return ircmsg, nil
			}
		}
	}

	nextFast := func(wantType types.RobustType) (*types.RobustMessage, error) {
		if nextIndexSliced == -1 {
			glog.Errorf("Compaction: falling back to nextSlow() for log index %d, msg id %v\n", logIndex, msg.Id)
			return nextSlow(wantType)
		}

		for {
			nextIndexSliced++
			if nextIndexSliced >= len(s.sliced[session.Id]) {
				return nil, ircserver.CursorEOF
			}
			nmsg := s.sliced[session.Id][nextIndexSliced]

			if nmsg == nil {
				continue
			}

			// TODO: introduce a parameter so that the relevant* functions
			// can specify whether they want just messages from _the same_
			// session or messages that are relevant to the session.
			if nmsg.Session != session &&
				(nmsg.Type != types.RobustCreateSession ||
					nmsg.Id != session) {
				continue
			}

			if wantType == types.RobustAny || wantType == nmsg.Type {
				if trace {
					log.Printf("Returning sliceidx=%d: %v\n", nextIndexSliced, nmsg.Data)
				}
				return nmsg, nil
			}
		}
	}

	switch msg.Type {
	case types.RobustDeleteSession:
		rmsg, err := prevSlow(types.RobustAny)
		if err != nil && err != ircserver.CursorEOF {
			return false, slicedIdx, err
		}
		return err == nil && rmsg.Type == types.RobustCreateSession, slicedIdx, nil

	case types.RobustCreateSession:
		_, err := nextSlow(types.RobustAny)
		if err != nil && err != ircserver.CursorEOF {
			return false, slicedIdx, err
		}

		// Sessions can be compacted away when they don’t contain any messages.
		return err == ircserver.CursorEOF, slicedIdx, nil

	case types.RobustIRCFromClient:
		p := irc.ParseMessage(msg.Data)

		relevantFast, errFast := ircServer.StillRelevant(s.servers[session.Id], p, prevFast, nextFast, reset)
		return !relevantFast, slicedIdx, errFast

	case types.RobustMessageOfDeath:
		// Messages of death can always be compacted, they will be skipped anyway.
		return true, slicedIdx, nil

	case types.RobustConfig:
		// TODO: implement compaction for RobustConfig
		return false, slicedIdx, nil

	case types.RobustIRCToClient:
		fallthrough
	case types.RobustPing:
		fallthrough
	case types.RobustAny:
		glog.Errorf("Compaction: saw unexpected message type %v (log id %d, message id %v)\n", msg.Type, logIndex, msg.Id)
		return false, slicedIdx, nil

	default:
		glog.Errorf("Compaction: saw message of unknown type %d (log id %d, message id %v)\n", msg.Type, logIndex, msg.Id)
		return false, slicedIdx, nil
	}
}