コード例 #1
0
ファイル: store.go プロジェクト: radcheb/influxdb
func (s *storeFSMSnapshot) Persist(sink raft.SnapshotSink) error {
	err := func() error {
		// Encode data.
		p, err := s.Data.MarshalBinary()
		if err != nil {
			return err
		}

		// Write data to sink.
		if _, err := sink.Write(p); err != nil {
			return err
		}

		// Close the sink.
		if err := sink.Close(); err != nil {
			return err
		}

		return nil
	}()

	if err != nil {
		sink.Cancel()
		return err
	}

	return nil
}
コード例 #2
0
ファイル: fsm.go プロジェクト: askagirl/consul
func (s *consulSnapshot) Persist(sink raft.SnapshotSink) error {
	// Register the nodes
	encoder := codec.NewEncoder(sink, msgpackHandle)

	// Write the header
	header := snapshotHeader{
		LastIndex: s.state.LastIndex(),
	}
	if err := encoder.Encode(&header); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistNodes(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistSessions(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistACLs(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistKV(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	return nil
}
コード例 #3
0
ファイル: raft.go プロジェクト: hfeeki/delayd
// Persist writes a snapshot to a file. We just serialize all active entries.
func (s *Snapshot) Persist(sink raft.SnapshotSink) error {
	_, err := sink.Write([]byte{snapSchemaVersion})
	if err != nil {
		sink.Cancel()
		return err
	}

	for i, e := range s.entries {
		_, err = sink.Write(s.uuids[i])
		if err != nil {
			sink.Cancel()
			return err
		}

		b, err := e.ToBytes()
		if err != nil {
			sink.Cancel()
			return err
		}

		_, err = sink.Write(uint32ToBytes(uint32(len(b))))
		if err != nil {
			sink.Cancel()
			return err
		}

		_, err = sink.Write(b)
		if err != nil {
			sink.Cancel()
			return err
		}
	}

	return sink.Close()
}
コード例 #4
0
ファイル: store.go プロジェクト: otoolep/hraftd
func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
	err := func() error {
		// Encode data.
		b, err := json.Marshal(f.store)
		if err != nil {
			return err
		}

		// Write data to sink.
		if _, err := sink.Write(b); err != nil {
			return err
		}

		// Close the sink.
		if err := sink.Close(); err != nil {
			return err
		}

		return nil
	}()

	if err != nil {
		sink.Cancel()
		return err
	}

	return nil
}
コード例 #5
0
ファイル: raft.go プロジェクト: abligh/ghostfish
func (snap *masterSnapshot) Persist(sink raft.SnapshotSink) error {
	data, _ := json.Marshal(snap.data)
	_, err := sink.Write(data)
	if err != nil {
		sink.Cancel()
	}
	return err
}
コード例 #6
0
ファイル: snapshot_test.go プロジェクト: pulcy/vault-monkey
// See raft.SnapshotSink.
func (m *MockSnapshot) Persist(sink raft.SnapshotSink) error {
	hd := codec.MsgpackHandle{}
	enc := codec.NewEncoder(sink, &hd)
	if err := enc.Encode(m.logs[:m.maxIndex]); err != nil {
		sink.Cancel()
		return err
	}
	sink.Close()
	return nil
}
コード例 #7
0
ファイル: store.go プロジェクト: imjorge/flynn
// Persist writes the snapshot to the sink.
func (ss *raftSnapshot) Persist(sink raft.SnapshotSink) error {
	// Write data to sink.
	if _, err := sink.Write(ss.data); err != nil {
		sink.Cancel()
		return err
	}

	// Close and exit.
	return sink.Close()
}
コード例 #8
0
ファイル: raft.go プロジェクト: justincampbell/goflake
func (fsm *fsm) Persist(sink raft.SnapshotSink) error {
	fsm.Lock()
	defer fsm.Unlock()

	data, _ := json.Marshal(fsm)
	_, err := sink.Write(data)
	if err != nil {
		sink.Cancel()
	}
	return err
}
コード例 #9
0
ファイル: store.go プロジェクト: zmedico/rqlite
// Persist writes the snapshot to the give sink.
func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
	err := func() error {
		// Write data to sink.
		if _, err := sink.Write(f.data); err != nil {
			return err
		}

		// Close the sink.
		if err := sink.Close(); err != nil {
			return err
		}

		return nil
	}()

	if err != nil {
		sink.Cancel()
		return err
	}

	return nil
}
コード例 #10
0
ファイル: store.go プロジェクト: GaobinWang/rqlite
// Persist writes the snapshot to the given sink.
func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
	err := func() error {
		// Start by writing size of database.
		b := new(bytes.Buffer)
		sz := uint64(len(f.database))
		err := binary.Write(b, binary.LittleEndian, sz)
		if err != nil {
			return err
		}
		if _, err := sink.Write(b.Bytes()); err != nil {
			return err
		}

		// Next write database to sink.
		if _, err := sink.Write(f.database); err != nil {
			return err
		}

		// Finally write the meta.
		if _, err := sink.Write(f.meta); err != nil {
			return err
		}

		// Close the sink.
		if err := sink.Close(); err != nil {
			return err
		}

		return nil
	}()

	if err != nil {
		sink.Cancel()
		return err
	}

	return nil
}
コード例 #11
0
ファイル: fsm.go プロジェクト: icexin/raftkv
// First, walk all kvs, write temp leveldb.
// Second, make tar.gz for temp leveldb dir
func (f *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
	// Create a temporary path for the state store
	tmpPath, err := ioutil.TempDir(os.TempDir(), "state")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmpPath)

	db, err := leveldb.OpenFile(tmpPath, nil)
	if err != nil {
		return err
	}
	iter := f.snapshot.NewIterator(nil, nil)
	for iter.Next() {
		err = db.Put(iter.Key(), iter.Value(), nil)
		if err != nil {
			db.Close()
			sink.Cancel()
			return err
		}
	}
	iter.Release()
	db.Close()

	// make tar.gz
	w := gzip.NewWriter(sink)
	err = Tar(tmpPath, w)
	if err != nil {
		sink.Cancel()
		return err
	}

	err = w.Close()
	if err != nil {
		sink.Cancel()
		return err
	}

	sink.Close()
	return nil
}
コード例 #12
0
ファイル: fsm.go プロジェクト: catroot/consul
func (s *consulSnapshot) Persist(sink raft.SnapshotSink) error {
	defer metrics.MeasureSince([]string{"consul", "fsm", "persist"}, time.Now())

	// Register the nodes
	encoder := codec.NewEncoder(sink, msgpackHandle)

	// Write the header
	header := snapshotHeader{
		LastIndex: s.state.LastIndex(),
	}
	if err := encoder.Encode(&header); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistNodes(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistSessions(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistACLs(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistKVs(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistTombstones(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	if err := s.persistPreparedQueries(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}

	return nil
}
コード例 #13
0
ファイル: fsm.go プロジェクト: dgshep/nomad
func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
	defer metrics.MeasureSince([]string{"nomad", "fsm", "persist"}, time.Now())
	// Register the nodes
	encoder := codec.NewEncoder(sink, structs.MsgpackHandle)

	// Write the header
	header := snapshotHeader{}
	if err := encoder.Encode(&header); err != nil {
		sink.Cancel()
		return err
	}

	// Write the time table
	sink.Write([]byte{byte(TimeTableSnapshot)})
	if err := s.timetable.Serialize(encoder); err != nil {
		sink.Cancel()
		return err
	}

	// Write all the data out
	if err := s.persistIndexes(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	if err := s.persistNodes(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	if err := s.persistJobs(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	if err := s.persistEvals(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	if err := s.persistAllocs(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	if err := s.persistPeriodicLaunches(sink, encoder); err != nil {
		sink.Cancel()
		return err
	}
	return nil
}
コード例 #14
0
ファイル: fsm.go プロジェクト: rayleyva/consul
func (s *consulSnapshot) Persist(sink raft.SnapshotSink) error {
	// Register the nodes
	handle := codec.MsgpackHandle{}
	encoder := codec.NewEncoder(sink, &handle)

	// Write the header
	header := snapshotHeader{
		LastIndex: s.state.LastIndex(),
	}
	if err := encoder.Encode(&header); err != nil {
		sink.Cancel()
		return err
	}

	// Get all the nodes
	nodes := s.state.Nodes()

	// Register each node
	var req structs.RegisterRequest
	for i := 0; i < len(nodes); i++ {
		req = structs.RegisterRequest{
			Node:    nodes[i].Node,
			Address: nodes[i].Address,
		}

		// Register the node itself
		sink.Write([]byte{byte(structs.RegisterRequestType)})
		if err := encoder.Encode(&req); err != nil {
			sink.Cancel()
			return err
		}

		// Register each service this node has
		services := s.state.NodeServices(nodes[i].Node)
		for _, srv := range services.Services {
			req.Service = srv
			sink.Write([]byte{byte(structs.RegisterRequestType)})
			if err := encoder.Encode(&req); err != nil {
				sink.Cancel()
				return err
			}
		}

		// Register each check this node has
		req.Service = nil
		checks := s.state.NodeChecks(nodes[i].Node)
		for _, check := range checks {
			req.Check = check
			sink.Write([]byte{byte(structs.RegisterRequestType)})
			if err := encoder.Encode(&req); err != nil {
				sink.Cancel()
				return err
			}
		}
	}

	// Enable GC of the ndoes
	nodes = nil

	// Dump the KVS entries
	streamCh := make(chan interface{}, 256)
	errorCh := make(chan error)
	go func() {
		if err := s.state.KVSDump(streamCh); err != nil {
			errorCh <- err
		}
	}()

OUTER:
	for {
		select {
		case raw := <-streamCh:
			if raw == nil {
				break OUTER
			}
			sink.Write([]byte{byte(structs.KVSRequestType)})
			if err := encoder.Encode(raw); err != nil {
				sink.Cancel()
				return err
			}

		case err := <-errorCh:
			sink.Cancel()
			return err
		}
	}

	return nil
}
コード例 #15
0
ファイル: robustirc.go プロジェクト: dopuskh3/robustirc
func (s *robustSnapshot) Persist(sink raft.SnapshotSink) error {
	log.Printf("Filtering and writing %d indexes\n", s.lastIndex-s.firstIndex)

	// Get a timestamp and keep it constant, so that we only compact messages
	// older than n days from compactionStart. If we used time.Since, new
	// messages would pour into the window on every compaction round, possibly
	// making the compaction never converge.
	compactionStart := time.Now()
	log.Printf("compactionStart %s\n", compactionStart.String())
	if *canaryCompactionStart > 0 {
		compactionStart = time.Unix(0, *canaryCompactionStart)
		log.Printf("compactionStart %s (overridden with -canary_compaction_start)\n", compactionStart.String())
	}

	sessions := make(map[types.RobustId]bool)

	// First pass: just parse all the messages
	for i := s.firstIndex; i <= s.lastIndex; i++ {
		var nlog raft.Log
		if err := s.store.GetLog(i, &nlog); err != nil {
			s.del[i] = true
			continue
		}

		// TODO: compact raft messages as well, so that peer changes are not kept forever
		if nlog.Type != raft.LogCommand {
			continue
		}

		parsed := types.NewRobustMessageFromBytes(nlog.Data)
		s.parsed[i] = &parsed

		if parsed.Type == types.RobustCreateSession {
			s.sliced[parsed.Id.Id] = append(s.sliced[parsed.Id.Id], &parsed)
		}

		if parsed.Type == types.RobustDeleteSession {
			s.sliced[parsed.Session.Id] = append(s.sliced[parsed.Session.Id], &parsed)
		}

		if parsed.Type == types.RobustIRCFromClient {
			// TODO: skip PING/PRIVMSG messages that are outside of the
			// compaction window to reduce the working set. should be a noop
			// since no relevant* function looks at PRIVMSG/PING.
			sessions[parsed.Session] = true
			vmsgs, _ := ircServer.Get(types.RobustId{Id: parsed.Id.Id})

			onlyerrors := true
			for _, msg := range vmsgs {
				ircmsg := irc.ParseMessage(msg.Data)
				if ircmsg == nil {
					glog.Errorf("Output message not parsable\n")
					continue
				}
				if !errorCodes[ircmsg.Command] {
					onlyerrors = false
				}
			}
			if len(vmsgs) > 0 && onlyerrors {
				s.del[i] = true
				continue
			}

			// Kind of a hack: we need to keep track of which sessions are
			// services connections and which are not, so that we can look at
			// the correct relevant-function (e.g. server_NICK vs. NICK).
			ircmsg := irc.ParseMessage(parsed.Data)
			if ircmsg != nil && strings.ToUpper(ircmsg.Command) == "SERVER" {
				s.servers[parsed.Session.Id] = true
			}

			// Every session which is interested in at least one of the output
			// messages gets a pointer to the input message stored in s.sliced
			// so that we can easily iterate over all relevant input messages.
			for session := range sessions {
				interested := false
				for _, msg := range vmsgs {
					if msg.InterestingFor[session.Id] {
						interested = true
						break
					}
				}
				// All messages that would be delivered to a session are
				// interesting for compaction, but also just any message that a
				// specific session sent.
				if interested || (len(vmsgs) > 0 && session.Id == parsed.Session.Id) {
					s.sliced[session.Id] = append(s.sliced[session.Id], &parsed)
				}
			}

			// Some messages don’t result in output messages, such as a JOIN
			// command for a channel the user is already in. We mark these as
			// interesting at least to the session from which they originated,
			// so that they can be detected and deleted.
			// TODO: would it be okay to just mark them for deletion? i.e., do all messages that modify state return a result?
			if len(vmsgs) == 0 {
				s.sliced[parsed.Session.Id] = append(s.sliced[parsed.Session.Id], &parsed)
			}
		}
	}

	log.Printf("got %d sessions\n", len(sessions))
	for session := range sessions {
		log.Printf("session 0x%x has %d messages\n", session.Id, len(s.sliced[session.Id]))
	}

	// We repeatedly compact, since the result of one compaction can affect the
	// result of other compactions (see compaction_test.go for examples).
	changed := true
	pass := 0
	for changed {
		log.Printf("Compaction pass %d\n", pass)
		pass++
		changed = false
		for i := s.firstIndex; i <= s.lastIndex; i++ {
			if i%1000 == 0 {
				log.Printf("message %d of %d (%.0f%%)\n",
					i, s.lastIndex, (float64(i)/float64(s.lastIndex))*100.0)
			}
			if s.del[i] {
				continue
			}

			msg, ok := s.parsed[i]
			if !ok {
				continue
			}

			if compactionStart.Sub(time.Unix(0, msg.Id.Id)) < 7*24*time.Hour {
				// If we ran outside the window, we don’t even need to look at
				// any newer messages anymore.
				break
			}

			session := msg.Session
			if msg.Type == types.RobustCreateSession {
				session = msg.Id
			}

			canCompact, slicedIdx, err := s.canCompact(session, msg, i)
			if err != nil {
				sink.Cancel()
				return err
			}
			if canCompact {
				s.del[i] = true
				if slicedIdx != -1 {
					s.sliced[session.Id][slicedIdx] = nil
				}
				changed = true
			}
		}
	}

	encoder := json.NewEncoder(sink)
	for i := s.firstIndex; i <= s.lastIndex; i++ {
		if s.del[i] {
			continue
		}

		var elog raft.Log

		if err := s.store.GetLog(i, &elog); err != nil {
			continue
		}

		if err := encoder.Encode(elog); err != nil {
			sink.Cancel()
			return err
		}
	}

	sink.Close()

	for idx, del := range s.del {
		if !del {
			continue
		}
		nmsg, ok := s.parsed[idx]
		// If the message was not found in parsed, then there was no message
		// with this index, hence there is nothing to delete.
		if !ok {
			continue
		}
		// TODO: Since outputstream uses a LevelDB database, we could be more
		// efficient and use batch deletions.
		if err := ircServer.Delete(nmsg.Id); err != nil {
			log.Panicf("Could not delete outputstream message: %v\n", err)
		}
		s.store.DeleteRange(idx, idx)
	}

	return nil
}