예제 #1
0
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
	srcSnap := path.Join(c.String("data-dir"), "member", "snap")
	destSnap := path.Join(c.String("backup-dir"), "member", "snap")
	srcWAL := path.Join(c.String("data-dir"), "member", "wal")
	destWAL := path.Join(c.String("backup-dir"), "member", "wal")

	if err := os.MkdirAll(destSnap, 0700); err != nil {
		log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
	}
	ss := snap.New(srcSnap)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		log.Fatal(err)
	}
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		newss := snap.New(destSnap)
		if err = newss.SaveSnap(*snapshot); err != nil {
			log.Fatal(err)
		}
	}

	w, err := wal.OpenForRead(srcWAL, walsnap)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close()
	wmetadata, state, ents, err := w.ReadAll()
	switch err {
	case nil:
	case wal.ErrSnapshotNotFound:
		fmt.Printf("Failed to find the match snapshot record %+v in wal %v.", walsnap, srcWAL)
		fmt.Printf("etcdctl will add it back. Start auto fixing...")
	default:
		log.Fatal(err)
	}
	var metadata etcdserverpb.Metadata
	pbutil.MustUnmarshal(&metadata, wmetadata)
	idgen := idutil.NewGenerator(0, time.Now())
	metadata.NodeID = idgen.Next()
	metadata.ClusterID = idgen.Next()

	neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
	if err != nil {
		log.Fatal(err)
	}
	defer neww.Close()
	if err := neww.Save(state, ents); err != nil {
		log.Fatal(err)
	}
	if err := neww.SaveSnapshot(walsnap); err != nil {
		log.Fatal(err)
	}
}
예제 #2
0
func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
	walDir := n.walDir()
	snapDir := n.snapDir()

	if err := os.MkdirAll(snapDir, 0700); err != nil {
		return fmt.Errorf("create snapshot directory error: %v", err)
	}

	// Create a snapshotter
	n.snapshotter = snap.New(snapDir)

	if !wal.Exist(walDir) {
		return errNoWAL
	}

	// Load snapshot data
	snapshot, err := n.snapshotter.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		return err
	}

	if snapshot != nil {
		// Load the snapshot data into the store
		if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
			return err
		}
	}

	// Read logs to fully catch up store
	if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
		return err
	}

	return nil
}
예제 #3
0
파일: snapwrap.go 프로젝트: harche/docker
// NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters
func (sc snapCryptor) New(dirpath string) Snapshotter {
	return &wrappedSnap{
		Snapshotter: snap.New(dirpath),
		encrypter:   sc.encrypter,
		decrypter:   sc.decrypter,
	}
}
예제 #4
0
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
	srcSnap := path.Join(c.String("data-dir"), "snap")
	destSnap := path.Join(c.String("backup-dir"), "snap")
	srcWAL := path.Join(c.String("data-dir"), "wal")
	destWAL := path.Join(c.String("backup-dir"), "wal")

	if err := os.MkdirAll(destSnap, 0700); err != nil {
		log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
	}
	ss := snap.New(srcSnap)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		log.Fatal(err)
	}
	var index uint64
	if snapshot != nil {
		index = snapshot.Metadata.Index
		newss := snap.New(destSnap)
		if err := newss.SaveSnap(*snapshot); err != nil {
			log.Fatal(err)
		}
	}

	w, err := wal.OpenNotInUse(srcWAL, index)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close()
	wmetadata, state, ents, err := w.ReadAll()
	if err != nil {
		log.Fatal(err)
	}
	var metadata etcdserverpb.Metadata
	pbutil.MustUnmarshal(&metadata, wmetadata)
	rand.Seed(time.Now().UnixNano())
	metadata.NodeID = etcdserver.GenID()
	metadata.ClusterID = etcdserver.GenID()

	neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
	if err != nil {
		log.Fatal(err)
	}
	defer neww.Close()
	if err := neww.Save(state, ents); err != nil {
		log.Fatal(err)
	}
}
예제 #5
0
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
	srcSnap := path.Join(c.String("data-dir"), "snap")
	destSnap := path.Join(c.String("backup-dir"), "snap")
	srcWAL := path.Join(c.String("data-dir"), "wal")
	destWAL := path.Join(c.String("backup-dir"), "wal")

	if err := os.MkdirAll(destSnap, 0700); err != nil {
		log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
	}
	ss := snap.New(srcSnap)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		log.Fatal(err)
	}
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		newss := snap.New(destSnap)
		if err := newss.SaveSnap(*snapshot); err != nil {
			log.Fatal(err)
		}
	}

	w, err := wal.OpenNotInUse(srcWAL, walsnap)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close()
	wmetadata, state, ents, err := w.ReadAll()
	if err != nil {
		log.Fatal(err)
	}
	var metadata etcdserverpb.Metadata
	pbutil.MustUnmarshal(&metadata, wmetadata)
	idgen := idutil.NewGenerator(0, time.Now())
	metadata.NodeID = idgen.Next()
	metadata.ClusterID = idgen.Next()

	neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
	if err != nil {
		log.Fatal(err)
	}
	defer neww.Close()
	if err := neww.Save(state, ents); err != nil {
		log.Fatal(err)
	}
}
예제 #6
0
func rebuildStoreV2() store.Store {
	waldir := migrateWALdir
	if len(waldir) == 0 {
		waldir = path.Join(migrateDatadir, "member", "wal")
	}
	snapdir := path.Join(migrateDatadir, "member", "snap")

	ss := snap.New(snapdir)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		ExitWithError(ExitError, err)
	}

	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}

	w, err := wal.OpenForRead(waldir, walsnap)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	defer w.Close()

	_, _, ents, err := w.ReadAll()
	if err != nil {
		ExitWithError(ExitError, err)
	}

	st := store.New()
	if snapshot != nil {
		err := st.Recovery(snapshot.Data)
		if err != nil {
			ExitWithError(ExitError, err)
		}
	}

	applier := etcdserver.NewApplierV2(st, nil)
	for _, ent := range ents {
		if ent.Type != raftpb.EntryNormal {
			continue
		}

		var raftReq pb.InternalRaftRequest
		if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
			var r pb.Request
			pbutil.MustUnmarshal(&r, ent.Data)
			applyRequest(&r, applier)
		} else {
			if raftReq.V2 != nil {
				req := raftReq.V2
				applyRequest(req, applier)
			}
		}
	}

	return st
}
예제 #7
0
파일: raft.go 프로젝트: mqliang/etcd
func (rc *raftNode) startRaft() {
	if !fileutil.Exist(rc.snapdir) {
		if err := os.Mkdir(rc.snapdir, 0750); err != nil {
			log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
		}
	}
	rc.snapshotter = snap.New(rc.snapdir)
	rc.snapshotterReady <- rc.snapshotter

	oldwal := wal.Exist(rc.waldir)
	rc.wal = rc.replayWAL()

	rpeers := make([]raft.Peer, len(rc.peers))
	for i := range rpeers {
		rpeers[i] = raft.Peer{ID: uint64(i + 1)}
	}
	c := &raft.Config{
		ID:              uint64(rc.id),
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         rc.raftStorage,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}

	if oldwal {
		rc.node = raft.RestartNode(c)
	} else {
		startPeers := rpeers
		if rc.join {
			startPeers = nil
		}
		rc.node = raft.StartNode(c, startPeers)
	}

	ss := &stats.ServerStats{}
	ss.Initialize()

	rc.transport = &rafthttp.Transport{
		ID:          types.ID(rc.id),
		ClusterID:   0x1000,
		Raft:        rc,
		ServerStats: ss,
		LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
		ErrorC:      make(chan error),
	}

	rc.transport.Start()
	for i := range rc.peers {
		if i+1 != rc.id {
			rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
		}
	}

	go rc.serveRaft()
	go rc.serveChannels()
}
예제 #8
0
func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
	walDir := n.walDir()
	snapDir := n.snapDir()

	if !fileutil.Exist(snapDir) {
		// If snapshots created by the etcd-v2 code exist, hard link
		// them at the new path. This prevents etc-v2 creating
		// snapshots that are visible to us, but out of sync with our
		// WALs, after a downgrade.
		legacySnapDir := n.legacySnapDir()
		if fileutil.Exist(legacySnapDir) {
			if err := migrateSnapshots(legacySnapDir, snapDir); err != nil {
				return err
			}
		} else if err := os.MkdirAll(snapDir, 0700); err != nil {
			return errors.Wrap(err, "failed to create snapshot directory")
		}
	}

	// Create a snapshotter
	n.snapshotter = snap.New(snapDir)

	if !wal.Exist(walDir) {
		// If wals created by the etcd-v2 wal code exist, copy them to
		// the new path to avoid adding backwards-incompatible entries
		// to those files.
		legacyWALDir := n.legacyWALDir()
		if !wal.Exist(legacyWALDir) {
			return errNoWAL
		}

		if err := migrateWALs(legacyWALDir, walDir); err != nil {
			return err
		}
	}

	// Load snapshot data
	snapshot, err := n.snapshotter.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		return err
	}

	if snapshot != nil {
		// Load the snapshot data into the store
		if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
			return err
		}
	}

	// Read logs to fully catch up store
	if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
		return err
	}

	return nil
}
예제 #9
0
func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
	walDir := n.walDir()
	snapDir := n.snapDir()

	if err := os.MkdirAll(snapDir, 0700); err != nil {
		return fmt.Errorf("create snapshot directory error: %v", err)
	}

	// Create a snapshotter
	n.snapshotter = snap.New(snapDir)

	if !wal.Exist(walDir) {
		raftNode := &api.RaftMember{
			RaftID: n.Config.ID,
			Addr:   n.Address,
		}
		metadata, err := raftNode.Marshal()
		if err != nil {
			return fmt.Errorf("error marshalling raft node: %v", err)
		}
		n.wal, err = wal.Create(walDir, metadata)
		if err != nil {
			return fmt.Errorf("create wal error: %v", err)
		}

		n.cluster.AddMember(&membership.Member{RaftMember: raftNode})
		n.startNodePeers = []raft.Peer{{ID: n.Config.ID, Context: metadata}}

		return nil
	}

	// Load snapshot data
	snapshot, err := n.snapshotter.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		return err
	}

	if snapshot != nil {
		// Load the snapshot data into the store
		if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
			return err
		}
	}

	// Read logs to fully catch up store
	if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
		return err
	}

	n.Node = raft.RestartNode(n.Config)
	return nil
}
예제 #10
0
func testSnapshotSend(t *testing.T, sm *snap.Message) (bool, []os.FileInfo) {
	d, err := ioutil.TempDir(os.TempDir(), "snapdir")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(d)

	r := &fakeRaft{}
	tr := &Transport{pipelineRt: &http.Transport{}, ClusterID: types.ID(1), Raft: r}
	ch := make(chan struct{}, 1)
	h := &syncHandler{newSnapshotHandler(tr, r, snap.New(d), types.ID(1)), ch}
	srv := httptest.NewServer(h)
	defer srv.Close()

	picker := mustNewURLPicker(t, []string{srv.URL})
	snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(types.ID(1)))
	defer snapsend.stop()

	snapsend.send(*sm)

	sent := false
	select {
	case <-time.After(time.Second):
		t.Fatalf("timed out sending snapshot")
	case sent = <-sm.CloseNotify():
	}

	// wait for handler to finish accepting snapshot
	<-ch

	files, rerr := ioutil.ReadDir(d)
	if rerr != nil {
		t.Fatal(rerr)
	}
	return sent, files
}
예제 #11
0
파일: server.go 프로젝트: robszumski/etcd
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
	st := store.New()
	var w *wal.WAL
	var n raft.Node
	var s *raft.MemoryStorage
	var id types.ID
	walVersion, err := wal.DetectVersion(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if walVersion == wal.WALUnknown {
		return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
	}
	haveWAL := walVersion != wal.WALNotExist

	if haveWAL && walVersion != wal.WALv0_5 {
		err := UpgradeWAL(cfg, walVersion)
		if err != nil {
			return nil, err
		}
	}

	ss := snap.New(cfg.SnapDir())
	switch {
	case !haveWAL && !cfg.NewCluster:
		us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
		existingCluster, err := GetClusterFromPeers(us)
		if err != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
		}
		if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		cfg.Cluster.SetID(existingCluster.id)
		cfg.Cluster.SetStore(st)
		cfg.Print()
		id, n, s, w = startNode(cfg, nil)
	case !haveWAL && cfg.NewCluster:
		if err := cfg.VerifyBootstrapConfig(); err != nil {
			return nil, err
		}
		if err := checkClientURLsEmptyFromPeers(cfg.Cluster, cfg.Name); err != nil {
			return nil, err
		}
		m := cfg.Cluster.MemberByName(cfg.Name)
		if cfg.ShouldDiscover() {
			s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
			if err != nil {
				return nil, err
			}
			if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
				return nil, err
			}
		}
		cfg.Cluster.SetStore(st)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
	case haveWAL:
		if cfg.ShouldDiscover() {
			log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		var index uint64
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err := st.Recovery(snapshot.Data); err != nil {
				log.Panicf("etcdserver: recovered store from snapshot error: %v", err)
			}
			log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index)
			index = snapshot.Metadata.Index
		}
		cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
		cfg.Print()
		if snapshot != nil {
			log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster)
		}
		if !cfg.ForceNewCluster {
			id, n, s, w = restartNode(cfg, index+1, snapshot)
		} else {
			id, n, s, w = restartAsStandaloneNode(cfg, index+1, snapshot)
		}
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	lstats := stats.NewLeaderStats(id.String())

	srv := &EtcdServer{
		cfg:         cfg,
		store:       st,
		node:        n,
		raftStorage: s,
		id:          id,
		attributes:  Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		Cluster:     cfg.Cluster,
		storage:     NewStorage(w, ss),
		stats:       sstats,
		lstats:      lstats,
		Ticker:      time.Tick(100 * time.Millisecond),
		SyncTicker:  time.Tick(500 * time.Millisecond),
		snapCount:   cfg.SnapCount,
	}
	srv.sendhub = newSendHub(cfg.Transport, cfg.Cluster, srv, sstats, lstats)
	for _, m := range getOtherMembers(cfg.Cluster, cfg.Name) {
		srv.sendhub.Add(m)
	}
	return srv, nil
}
예제 #12
0
파일: main.go 프로젝트: luxas/flannel
func main() {
	from := flag.String("data-dir", "", "")
	snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
	index := flag.Uint64("start-index", 0, "The index to start dumping")
	flag.Parse()
	if *from == "" {
		log.Fatal("Must provide -data-dir flag.")
	}
	if *snapfile != "" && *index != 0 {
		log.Fatal("start-snap and start-index flags cannot be used together.")
	}

	var (
		walsnap  walpb.Snapshot
		snapshot *raftpb.Snapshot
		err      error
	)

	isIndex := *index != 0

	if isIndex {
		fmt.Printf("Start dumping log entries from index %d.\n", *index)
		walsnap.Index = *index
	} else {
		if *snapfile == "" {
			ss := snap.New(snapDir(*from))
			snapshot, err = ss.Load()
		} else {
			snapshot, err = snap.Read(path.Join(snapDir(*from), *snapfile))
		}

		switch err {
		case nil:
			walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
			nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
			fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
				walsnap.Term, walsnap.Index, nodes)
		case snap.ErrNoSnapshot:
			fmt.Printf("Snapshot:\nempty\n")
		default:
			log.Fatalf("Failed loading snapshot: %v", err)
		}
		fmt.Println("Start dupmping log entries from snapshot.")
	}

	w, err := wal.OpenForRead(walDir(*from), walsnap)
	if err != nil {
		log.Fatalf("Failed opening WAL: %v", err)
	}
	wmetadata, state, ents, err := w.ReadAll()
	w.Close()
	if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
		log.Fatalf("Failed reading WAL: %v", err)
	}
	id, cid := parseWALMetadata(wmetadata)
	vid := types.ID(state.Vote)
	fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
		id, cid, state.Term, state.Commit, vid)

	fmt.Printf("WAL entries:\n")
	fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
	fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
	for _, e := range ents {
		msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			msg = fmt.Sprintf("%s\tnorm", msg)

			var rr etcdserverpb.InternalRaftRequest
			if err := rr.Unmarshal(e.Data); err == nil {
				msg = fmt.Sprintf("%s\t%s", msg, rr.String())
				break
			}

			var r etcdserverpb.Request
			if err := r.Unmarshal(e.Data); err == nil {
				switch r.Method {
				case "":
					msg = fmt.Sprintf("%s\tnoop", msg)
				case "SYNC":
					msg = fmt.Sprintf("%s\tmethod=SYNC time=%q", msg, time.Unix(0, r.Time))
				case "QGET", "DELETE":
					msg = fmt.Sprintf("%s\tmethod=%s path=%s", msg, r.Method, excerpt(r.Path, 64, 64))
				default:
					msg = fmt.Sprintf("%s\tmethod=%s path=%s val=%s", msg, r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
				}
				break
			}
			msg = fmt.Sprintf("%s\t???", msg)
		case raftpb.EntryConfChange:
			msg = fmt.Sprintf("%s\tconf", msg)
			var r raftpb.ConfChange
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
			} else {
				msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
			}
		}
		fmt.Println(msg)
	}
}
예제 #13
0
func rebuild(datadir string) ([]byte, *raftpb.HardState, store.Store, error) {
	waldir := path.Join(datadir, "member", "wal")
	snapdir := path.Join(datadir, "member", "snap")

	ss := snap.New(snapdir)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		return nil, nil, nil, err
	}

	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}

	w, err := wal.OpenForRead(waldir, walsnap)
	if err != nil {
		return nil, nil, nil, err
	}
	defer w.Close()

	meta, hardstate, ents, err := w.ReadAll()
	if err != nil {
		return nil, nil, nil, err
	}

	st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
	if snapshot != nil {
		err := st.Recovery(snapshot.Data)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	cluster := membership.NewCluster("")
	cluster.SetStore(st)
	cluster.Recover(func(*semver.Version) {})

	applier := etcdserver.NewApplierV2(st, cluster)
	for _, ent := range ents {
		if ent.Type == raftpb.EntryConfChange {
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, ent.Data)
			switch cc.Type {
			case raftpb.ConfChangeAddNode:
				m := new(membership.Member)
				if err := json.Unmarshal(cc.Context, m); err != nil {
					return nil, nil, nil, err
				}
				cluster.AddMember(m)
			case raftpb.ConfChangeRemoveNode:
				id := types.ID(cc.NodeID)
				cluster.RemoveMember(id)
			case raftpb.ConfChangeUpdateNode:
				m := new(membership.Member)
				if err := json.Unmarshal(cc.Context, m); err != nil {
					return nil, nil, nil, err
				}
				cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
			}
			continue
		}

		var raftReq pb.InternalRaftRequest
		if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
			var r pb.Request
			pbutil.MustUnmarshal(&r, ent.Data)
			applyRequest(&r, applier)
		} else {
			if raftReq.V2 != nil {
				req := raftReq.V2
				applyRequest(req, applier)
			}
		}
	}

	return meta, &hardstate, st, nil
}
예제 #14
0
func main() {
	flag.Parse()
	if len(*migrateDatadir) == 0 {
		glog.Fatal("need to set '--data-dir'")
	}
	dbpath := path.Join(*migrateDatadir, "member", "snap", "db")

	// etcd3 store backend. We will use it to parse v3 data files and extract information.
	be := backend.NewDefaultBackend(dbpath)
	tx := be.BatchTx()

	// etcd2 store backend. We will use v3 data to update this and then save snapshot to disk.
	st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
	expireTime := time.Now().Add(*ttl)

	tx.Lock()
	err := tx.UnsafeForEach([]byte("key"), func(k, v []byte) error {
		kv := &mvccpb.KeyValue{}
		kv.Unmarshal(v)

		// This is compact key.
		if !strings.HasPrefix(string(kv.Key), "/") {
			return nil
		}

		ttlOpt := store.TTLOptionSet{}
		if kv.Lease != 0 {
			ttlOpt = store.TTLOptionSet{ExpireTime: expireTime}
		}

		if !isTombstone(k) {
			sk := path.Join(strings.Trim(etcdserver.StoreKeysPrefix, "/"), string(kv.Key))
			_, err := st.Set(sk, false, string(kv.Value), ttlOpt)
			if err != nil {
				return err
			}
		} else {
			st.Delete(string(kv.Key), false, false)
		}

		return nil
	})
	if err != nil {
		glog.Fatal(err)
	}
	tx.Unlock()

	if err := traverseAndDeleteEmptyDir(st, "/"); err != nil {
		glog.Fatal(err)
	}

	// rebuild cluster state.
	metadata, hardstate, oldSt, err := rebuild(*migrateDatadir)
	if err != nil {
		glog.Fatal(err)
	}

	// In the following, it's low level logic that saves metadata and data into v2 snapshot.
	backupPath := *migrateDatadir + ".rollback.backup"
	if err := os.Rename(*migrateDatadir, backupPath); err != nil {
		glog.Fatal(err)
	}
	if err := os.MkdirAll(path.Join(*migrateDatadir, "member", "snap"), 0700); err != nil {
		glog.Fatal(err)
	}
	walDir := path.Join(*migrateDatadir, "member", "wal")

	w, err := wal.Create(walDir, metadata)
	if err != nil {
		glog.Fatal(err)
	}
	err = w.SaveSnapshot(walpb.Snapshot{Index: hardstate.Commit, Term: hardstate.Term})
	if err != nil {
		glog.Fatal(err)
	}
	w.Close()

	event, err := oldSt.Get(etcdserver.StoreClusterPrefix, true, false)
	if err != nil {
		glog.Fatal(err)
	}
	// nodes (members info) for ConfState
	nodes := []uint64{}
	traverseMetadata(event.Node, func(n *store.NodeExtern) {
		if n.Key != etcdserver.StoreClusterPrefix {
			// update store metadata
			v := ""
			if !n.Dir {
				v = *n.Value
			}
			if n.Key == path.Join(etcdserver.StoreClusterPrefix, "version") {
				v = rollbackVersion
			}
			if _, err := st.Set(n.Key, n.Dir, v, store.TTLOptionSet{}); err != nil {
				glog.Fatal(err)
			}

			// update nodes
			fields := strings.Split(n.Key, "/")
			if len(fields) == 4 && fields[2] == "members" {
				nodeID, err := strconv.ParseUint(fields[3], 16, 64)
				if err != nil {
					glog.Fatalf("failed to parse member ID (%s): %v", fields[3], err)
				}
				nodes = append(nodes, nodeID)
			}
		}
	})

	data, err := st.Save()
	if err != nil {
		glog.Fatal(err)
	}
	raftSnap := raftpb.Snapshot{
		Data: data,
		Metadata: raftpb.SnapshotMetadata{
			Index: hardstate.Commit,
			Term:  hardstate.Term,
			ConfState: raftpb.ConfState{
				Nodes: nodes,
			},
		},
	}
	snapshotter := snap.New(path.Join(*migrateDatadir, "member", "snap"))
	if err := snapshotter.SaveSnap(raftSnap); err != nil {
		glog.Fatal(err)
	}
	fmt.Println("Finished successfully")
}
예제 #15
0
파일: transport.go 프로젝트: nhr/origin
func (s *snapTransporter) SendSnapshot(m snap.Message) {
	ss := snap.New(s.snapDir)
	ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
	m.CloseWithError(nil)
	s.snapDoneC <- m
}
예제 #16
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)
	var w *wal.WAL
	var n raft.Node
	var s *raft.MemoryStorage
	var id types.ID
	var cl *cluster

	// Run the migrations.
	dataVer, err := version.DetectDataDir(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if err := upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
		return nil, err
	}

	haveWAL := wal.Exist(cfg.WALDir())
	ss := snap.New(cfg.SnapDir())

	var remotes []*Member
	switch {
	case !haveWAL && !cfg.NewCluster:
		if err := cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), cfg.Transport)
		if err != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
		}
		if err := ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, cfg.Transport) {
			return nil, fmt.Errorf("incomptible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.id)
		cl.SetStore(st)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err := cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, cfg.Transport) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			str, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, err
			}
			urlsmap, err := types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil {
			return nil, fmt.Errorf("cannot write to data directory: %v", err)
		}

		if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err := st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if snapshot != nil {
			plog.Infof("loaded cluster information from store: %s", cl)
		}
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.Recover()
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	srv := &EtcdServer{
		cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			Node:        n,
			ticker:      time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
			raftStorage: s,
			storage:     NewStorage(w, ss),
		},
		id:            id,
		attributes:    Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		reqIDGen:      idutil.NewGenerator(uint8(id), time.Now()),
		forceVersionC: make(chan struct{}),
	}

	// TODO: move transport initialization near the definition of remote
	tr := rafthttp.NewTransporter(cfg.Transport, id, cl.ID(), srv, srv.errorc, sstats, lstats)
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr
	return srv, nil
}
예제 #17
0
파일: server.go 프로젝트: digideskio/etcd
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) *EtcdServer {
	m := cfg.Cluster.FindName(cfg.Name)
	if m == nil {
		// Should never happen
		log.Fatalf("could not find name %v in cluster!", cfg.Name)
	}
	snapdir := path.Join(cfg.DataDir, "snap")
	if err := os.MkdirAll(snapdir, privateDirMode); err != nil {
		log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
	}
	ss := snap.New(snapdir)
	st := store.New()
	var w *wal.WAL
	var n raft.Node
	var err error
	waldir := path.Join(cfg.DataDir, "wal")
	if !wal.Exist(waldir) {
		if w, err = wal.Create(waldir); err != nil {
			log.Fatal(err)
		}
		n = raft.StartNode(m.ID, cfg.Cluster.IDs(), 10, 1)
	} else {
		var index int64
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			log.Fatal(err)
		}
		if snapshot != nil {
			log.Printf("etcdserver: restart from snapshot at index %d", snapshot.Index)
			st.Recovery(snapshot.Data)
			index = snapshot.Index
		}

		// restart a node from previous wal
		if w, err = wal.OpenAtIndex(waldir, index); err != nil {
			log.Fatal(err)
		}
		wid, st, ents, err := w.ReadAll()
		if err != nil {
			log.Fatal(err)
		}
		// TODO(xiangli): save/recovery nodeID?
		if wid != 0 {
			log.Fatalf("unexpected nodeid %d: nodeid should always be zero until we save nodeid into wal", wid)
		}
		n = raft.RestartNode(m.ID, cfg.Cluster.IDs(), 10, 1, snapshot, st, ents)
	}

	cls := NewClusterStore(st, *cfg.Cluster)

	s := &EtcdServer{
		store: st,
		node:  n,
		name:  cfg.Name,
		storage: struct {
			*wal.WAL
			*snap.Snapshotter
		}{w, ss},
		send:         Sender(cfg.Transport, cls),
		clientURLs:   cfg.ClientURLs,
		ticker:       time.Tick(100 * time.Millisecond),
		syncTicker:   time.Tick(500 * time.Millisecond),
		snapCount:    cfg.SnapCount,
		ClusterStore: cls,
	}
	return s
}
예제 #18
0
func Migrate4To2(dataDir string, name string) error {
	// prep new directories
	sd2 := snapDir2(dataDir)
	if err := os.MkdirAll(sd2, 0700); err != nil {
		return fmt.Errorf("failed creating snapshot directory %s: %v", sd2, err)
	}

	// read v0.4 data
	snap4, err := DecodeLatestSnapshot4FromDir(snapDir4(dataDir))
	if err != nil {
		return err
	}

	cfg4, err := DecodeConfig4FromFile(cfgFile4(dataDir))
	if err != nil {
		return err
	}

	ents4, err := DecodeLog4FromFile(logFile4(dataDir))
	if err != nil {
		return err
	}

	nodeIDs := ents4.NodeIDs()
	nodeID := GuessNodeID(nodeIDs, snap4, cfg4, name)

	if nodeID == 0 {
		return fmt.Errorf("Couldn't figure out the node ID from the log or flags, cannot convert")
	}

	metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: nodeID, ClusterID: 0x04add5})
	wd2 := walDir2(dataDir)
	w, err := wal.Create(wd2, metadata)
	if err != nil {
		return fmt.Errorf("failed initializing wal at %s: %v", wd2, err)
	}
	defer w.Close()

	// transform v0.4 data
	var snap2 *raftpb.Snapshot
	if snap4 == nil {
		log.Printf("No snapshot found")
	} else {
		log.Printf("Found snapshot: lastIndex=%d", snap4.LastIndex)

		snap2 = snap4.Snapshot2()
	}

	st2 := cfg4.HardState2()

	// If we've got the most recent snapshot, we can use it's committed index. Still likely less than the current actual index, but worth it for the replay.
	if snap2 != nil {
		st2.Commit = snap2.Metadata.Index
	}

	ents2, err := Entries4To2(ents4)
	if err != nil {
		return err
	}

	ents2Len := len(ents2)
	log.Printf("Found %d log entries: firstIndex=%d lastIndex=%d", ents2Len, ents2[0].Index, ents2[ents2Len-1].Index)

	// set the state term to the biggest term we have ever seen,
	// so term of future entries will not be the same with term of old ones.
	st2.Term = ents2[ents2Len-1].Term

	// explicitly prepend an empty entry as the WAL code expects it
	ents2 = append(make([]raftpb.Entry, 1), ents2...)

	if err = w.Save(st2, ents2); err != nil {
		return err
	}
	log.Printf("Log migration successful")

	// migrate snapshot (if necessary) and logs
	var walsnap walpb.Snapshot
	if snap2 != nil {
		walsnap.Index, walsnap.Term = snap2.Metadata.Index, snap2.Metadata.Term
		ss := snap.New(sd2)
		if err := ss.SaveSnap(*snap2); err != nil {
			return err
		}
		log.Printf("Snapshot migration successful")
	}
	if err = w.SaveSnapshot(walsnap); err != nil {
		return err
	}

	return nil
}
예제 #19
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) *EtcdServer {
	if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
		log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
	}
	ss := snap.New(cfg.SnapDir())
	st := store.New()
	var w *wal.WAL
	var n raft.Node
	var id uint64
	haveWAL := wal.Exist(cfg.WALDir())
	switch {
	case !haveWAL && cfg.ClusterState == ClusterStateValueExisting:
		cl, err := GetClusterFromPeers(cfg.Cluster.PeerURLs())
		if err != nil {
			log.Fatal(err)
		}
		if err := cfg.Cluster.ValidateAndAssignIDs(cl.Members()); err != nil {
			log.Fatalf("etcdserver: %v", err)
		}
		cfg.Cluster.SetID(cl.id)
		cfg.Cluster.SetStore(st)
		id, n, w = startNode(cfg, nil)
	case !haveWAL && cfg.ClusterState == ClusterStateValueNew:
		if err := cfg.VerifyBootstrapConfig(); err != nil {
			log.Fatalf("etcdserver: %v", err)
		}
		m := cfg.Cluster.MemberByName(cfg.Name)
		if cfg.ShouldDiscover() {
			d, err := discovery.New(cfg.DiscoveryURL, m.ID, cfg.Cluster.String())
			if err != nil {
				log.Fatalf("etcdserver: cannot init discovery %v", err)
			}
			s, err := d.Discover()
			if err != nil {
				log.Fatalf("etcdserver: %v", err)
			}
			if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.name, s); err != nil {
				log.Fatalf("etcdserver: %v", err)
			}
		}
		cfg.Cluster.SetStore(st)
		id, n, w = startNode(cfg, cfg.Cluster.MemberIDs())
	case haveWAL:
		if cfg.ShouldDiscover() {
			log.Printf("etcdserver: warn: ignoring discovery: etcd has already been initialized and has a valid log in %q", cfg.WALDir())
		}
		var index uint64
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			log.Fatal(err)
		}
		if snapshot != nil {
			log.Printf("etcdserver: recovering from snapshot at index %d", snapshot.Index)
			st.Recovery(snapshot.Data)
			index = snapshot.Index
		}
		cfg.Cluster = NewClusterFromStore(cfg.Cluster.name, st)
		id, n, w = restartNode(cfg, index, snapshot)
	default:
		log.Fatalf("etcdserver: unsupported bootstrap config")
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   strutil.IDAsHex(id),
	}
	lstats := stats.NewLeaderStats(strutil.IDAsHex(id))

	s := &EtcdServer{
		store:      st,
		node:       n,
		id:         id,
		attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		Cluster:    cfg.Cluster,
		storage: struct {
			*wal.WAL
			*snap.Snapshotter
		}{w, ss},
		stats:      sstats,
		lstats:     lstats,
		send:       Sender(cfg.Transport, cfg.Cluster, sstats, lstats),
		Ticker:     time.Tick(100 * time.Millisecond),
		SyncTicker: time.Tick(500 * time.Millisecond),
		snapCount:  cfg.SnapCount,
	}
	return s
}
예제 #20
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
	st := store.New(StoreAdminPrefix, StoreKeysPrefix)
	var w *wal.WAL
	var n raft.Node
	var s *raft.MemoryStorage
	var id types.ID
	walVersion, err := wal.DetectVersion(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if walVersion == wal.WALUnknown {
		return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
	}
	haveWAL := walVersion != wal.WALNotExist

	ss := snap.New(cfg.SnapDir())
	switch {
	case !haveWAL && !cfg.NewCluster:
		us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
		existingCluster, err := GetClusterFromPeers(us, cfg.Transport)
		if err != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
		}
		if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		cfg.Cluster.SetID(existingCluster.id)
		cfg.Cluster.SetStore(st)
		cfg.Print()
		id, n, s, w = startNode(cfg, nil)
	case !haveWAL && cfg.NewCluster:
		if err := cfg.VerifyBootstrapConfig(); err != nil {
			return nil, err
		}
		m := cfg.Cluster.MemberByName(cfg.Name)
		if isBootstrapped(cfg) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
			if err != nil {
				return nil, err
			}
			if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
				return nil, err
			}
		}
		cfg.Cluster.SetStore(st)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
	case haveWAL:
		if walVersion != wal.WALv0_5 {
			if err := upgradeWAL(cfg, walVersion); err != nil {
				return nil, err
			}
		}

		if cfg.ShouldDiscover() {
			log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err := st.Recovery(snapshot.Data); err != nil {
				log.Panicf("etcdserver: recovered store from snapshot error: %v", err)
			}
			log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
		cfg.Print()
		if snapshot != nil {
			log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster)
		}
		if !cfg.ForceNewCluster {
			id, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	lstats := stats.NewLeaderStats(id.String())

	srv := &EtcdServer{
		cfg:    cfg,
		errorc: make(chan error, 1),
		store:  st,
		r: raftNode{
			Node:        n,
			snapCount:   cfg.SnapCount,
			ticker:      time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
			raftStorage: s,
			storage:     NewStorage(w, ss),
		},
		id:         id,
		attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		Cluster:    cfg.Cluster,
		stats:      sstats,
		lstats:     lstats,
		SyncTicker: time.Tick(500 * time.Millisecond),
		reqIDGen:   idutil.NewGenerator(uint8(id), time.Now()),
	}

	tr := rafthttp.NewTransporter(cfg.Transport, id, cfg.Cluster.ID(), srv, srv.errorc, sstats, lstats)
	// add all the remote members into sendhub
	for _, m := range cfg.Cluster.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr
	return srv, nil
}
예제 #21
0
func rebuildStoreV2() (store.Store, uint64) {
	var index uint64
	cl := membership.NewCluster("")

	waldir := migrateWALdir
	if len(waldir) == 0 {
		waldir = path.Join(migrateDatadir, "member", "wal")
	}
	snapdir := path.Join(migrateDatadir, "member", "snap")

	ss := snap.New(snapdir)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		ExitWithError(ExitError, err)
	}

	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		index = snapshot.Metadata.Index
	}

	w, err := wal.OpenForRead(waldir, walsnap)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	defer w.Close()

	_, _, ents, err := w.ReadAll()
	if err != nil {
		ExitWithError(ExitError, err)
	}

	st := store.New()
	if snapshot != nil {
		err := st.Recovery(snapshot.Data)
		if err != nil {
			ExitWithError(ExitError, err)
		}
	}

	cl.SetStore(st)
	cl.Recover(api.UpdateCapability)

	applier := etcdserver.NewApplierV2(st, cl)
	for _, ent := range ents {
		if ent.Type == raftpb.EntryConfChange {
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, ent.Data)
			applyConf(cc, cl)
			continue
		}

		var raftReq pb.InternalRaftRequest
		if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
			var r pb.Request
			pbutil.MustUnmarshal(&r, ent.Data)
			applyRequest(&r, applier)
		} else {
			if raftReq.V2 != nil {
				req := raftReq.V2
				applyRequest(req, applier)
			}
		}
		if ent.Index > index {
			index = ent.Index
		}
	}

	return st, index
}
예제 #22
0
// makeWAL creates a WAL for the initial cluster
func makeWALAndSnap(waldir, snapdir string, cl *membership.RaftCluster) {
	if err := fileutil.CreateDirAll(waldir); err != nil {
		ExitWithError(ExitIO, err)
	}

	// add members again to persist them to the store we create.
	st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
	cl.SetStore(st)
	for _, m := range cl.Members() {
		cl.AddMember(m)
	}

	m := cl.MemberByName(restoreName)
	md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(cl.ID())}
	metadata, merr := md.Marshal()
	if merr != nil {
		ExitWithError(ExitInvalidInput, merr)
	}

	w, walerr := wal.Create(waldir, metadata)
	if walerr != nil {
		ExitWithError(ExitIO, walerr)
	}
	defer w.Close()

	peers := make([]raft.Peer, len(cl.MemberIDs()))
	for i, id := range cl.MemberIDs() {
		ctx, err := json.Marshal((*cl).Member(id))
		if err != nil {
			ExitWithError(ExitInvalidInput, err)
		}
		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
	}

	ents := make([]raftpb.Entry, len(peers))
	nodeIDs := make([]uint64, len(peers))
	for i, p := range peers {
		nodeIDs[i] = p.ID
		cc := raftpb.ConfChange{
			Type:    raftpb.ConfChangeAddNode,
			NodeID:  p.ID,
			Context: p.Context}
		d, err := cc.Marshal()
		if err != nil {
			ExitWithError(ExitInvalidInput, err)
		}
		e := raftpb.Entry{
			Type:  raftpb.EntryConfChange,
			Term:  1,
			Index: uint64(i + 1),
			Data:  d,
		}
		ents[i] = e
	}

	commit, term := uint64(len(ents)), uint64(1)

	if err := w.Save(raftpb.HardState{
		Term:   term,
		Vote:   peers[0].ID,
		Commit: commit}, ents); err != nil {
		ExitWithError(ExitIO, err)
	}

	b, berr := st.Save()
	if berr != nil {
		ExitWithError(ExitError, berr)
	}

	raftSnap := raftpb.Snapshot{
		Data: b,
		Metadata: raftpb.SnapshotMetadata{
			Index: commit,
			Term:  term,
			ConfState: raftpb.ConfState{
				Nodes: nodeIDs,
			},
		},
	}
	snapshotter := snap.New(snapdir)
	if err := snapshotter.SaveSnap(raftSnap); err != nil {
		panic(err)
	}

	if err := w.SaveSnapshot(walpb.Snapshot{Index: commit, Term: term}); err != nil {
		ExitWithError(ExitIO, err)
	}
}
예제 #23
0
파일: etcd4.go 프로젝트: robszumski/etcd
func Migrate4To5(dataDir string, name string) error {
	// prep new directories
	sd5 := snapDir5(dataDir)
	if err := os.MkdirAll(sd5, 0700); err != nil {
		return fmt.Errorf("failed creating snapshot directory %s: %v", sd5, err)
	}

	// read v0.4 data
	snap4, err := DecodeLatestSnapshot4FromDir(snapDir4(dataDir))
	if err != nil {
		return err
	}

	cfg4, err := DecodeConfig4FromFile(cfgFile4(dataDir))
	if err != nil {
		return err
	}

	ents4, err := DecodeLog4FromFile(logFile4(dataDir))
	if err != nil {
		return err
	}

	nodeIDs := ents4.NodeIDs()
	nodeID := GuessNodeID(nodeIDs, snap4, cfg4, name)

	if nodeID == 0 {
		return fmt.Errorf("Couldn't figure out the node ID from the log or flags, cannot convert")
	}

	metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: nodeID, ClusterID: 0x04add5})
	wd5 := walDir5(dataDir)
	w, err := wal.Create(wd5, metadata)
	if err != nil {
		return fmt.Errorf("failed initializing wal at %s: %v", wd5, err)
	}
	defer w.Close()

	// transform v0.4 data
	var snap5 *raftpb.Snapshot
	if snap4 == nil {
		log.Printf("No snapshot found")
	} else {
		log.Printf("Found snapshot: lastIndex=%d", snap4.LastIndex)

		snap5 = snap4.Snapshot5()
	}

	st5 := cfg4.HardState5()

	// If we've got the most recent snapshot, we can use it's committed index. Still likely less than the current actual index, but worth it for the replay.
	if snap5 != nil {
		st5.Commit = snap5.Metadata.Index
	}

	ents5, err := Entries4To5(ents4)
	if err != nil {
		return err
	}

	ents5Len := len(ents5)
	log.Printf("Found %d log entries: firstIndex=%d lastIndex=%d", ents5Len, ents5[0].Index, ents5[ents5Len-1].Index)

	// explicitly prepend an empty entry as the WAL code expects it
	ents5 = append(make([]raftpb.Entry, 1), ents5...)

	if err = w.Save(st5, ents5); err != nil {
		return err
	}
	log.Printf("Log migration successful")

	// migrate snapshot (if necessary) and logs
	if snap5 != nil {
		ss := snap.New(sd5)
		if err := ss.SaveSnap(*snap5); err != nil {
			return err
		}
		log.Printf("Snapshot migration successful")
	}

	return nil
}
예제 #24
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)

	var (
		w  *wal.WAL
		n  raft.Node
		s  *raft.MemoryStorage
		id types.ID
		cl *membership.RaftCluster
	)

	if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
		return nil, fmt.Errorf("cannot access data directory: %v", terr)
	}

	haveWAL := wal.Exist(cfg.WALDir())

	if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	ss := snap.New(cfg.SnapDir())

	bepath := path.Join(cfg.SnapDir(), databaseFilename)
	beExist := fileutil.Exist(bepath)

	var be backend.Backend
	beOpened := make(chan struct{})
	go func() {
		be = backend.NewDefaultBackend(bepath)
		beOpened <- struct{}{}
	}()

	select {
	case <-beOpened:
	case <-time.After(time.Second):
		plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
		plog.Warningf("waiting for it to exit before starting...")
		<-beOpened
	}

	defer func() {
		if err != nil {
			be.Close()
		}
	}()

	prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
	if err != nil {
		return nil, err
	}
	var (
		remotes  []*membership.Member
		snapshot *raftpb.Snapshot
	)

	switch {
	case !haveWAL && !cfg.NewCluster:
		if err = cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
		if gerr != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
		}
		if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
			return nil, fmt.Errorf("incompatible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.ID())
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err = cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			var str string
			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, &DiscoveryError{Op: "join", Err: err}
			}
			var urlsmap types.URLsMap
			urlsmap, err = types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		snapshot, err = ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err = st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cl.Recover(api.UpdateCapability)
		if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
			os.RemoveAll(bepath)
			return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
		}
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
		return nil, fmt.Errorf("cannot access member directory: %v", terr)
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
	srv = &EtcdServer{
		readych:   make(chan struct{}),
		Cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
			Node:        n,
			ticker:      time.Tick(heartbeat),
			// set up contention detectors for raft heartbeat message.
			// expect to send a heartbeat within 2 heartbeat intervals.
			td:          contention.NewTimeoutDetector(2 * heartbeat),
			heartbeat:   heartbeat,
			raftStorage: s,
			storage:     NewStorage(w, ss),
			msgSnapC:    make(chan raftpb.Message, maxInFlightMsgSnap),
			readStateC:  make(chan raft.ReadState, 1),
		},
		id:            id,
		attributes:    membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		peerRt:        prt,
		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
		forceVersionC: make(chan struct{}),
	}

	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}

	srv.be = be
	minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat

	// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
	// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
	srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
	srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
	if beExist {
		kvindex := srv.kv.ConsistentIndex()
		// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
		// etcd from pre-3.0 release.
		if snapshot != nil && kvindex < snapshot.Metadata.Index {
			if kvindex != 0 {
				return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
			}
			plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
		}
	}
	srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())

	srv.authStore = auth.NewAuthStore(srv.be)
	if h := cfg.AutoCompactionRetention; h != 0 {
		srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
		srv.compactor.Run()
	}

	srv.applyV3Base = &applierV3backend{srv}
	if err = srv.restoreAlarms(); err != nil {
		return nil, err
	}

	// TODO: move transport initialization near the definition of remote
	tr := &rafthttp.Transport{
		TLSInfo:     cfg.PeerTLSInfo,
		DialTimeout: cfg.peerDialTimeout(),
		ID:          id,
		URLs:        cfg.PeerURLs,
		ClusterID:   cl.ID(),
		Raft:        srv,
		Snapshotter: ss,
		ServerStats: sstats,
		LeaderStats: lstats,
		ErrorC:      srv.errorc,
	}
	if err = tr.Start(); err != nil {
		return nil, err
	}
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr

	return srv, nil
}
예제 #25
0
파일: main.go 프로젝트: ngpestelos/etcd
func main() {
	from := flag.String("data-dir", "", "")
	flag.Parse()
	if *from == "" {
		log.Fatal("Must provide -data-dir flag")
	}

	ss := snap.New(snapDir(*from))
	snapshot, err := ss.Load()
	var walsnap walpb.Snapshot
	switch err {
	case nil:
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
		fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
			walsnap.Term, walsnap.Index, nodes)
	case snap.ErrNoSnapshot:
		fmt.Printf("Snapshot:\nempty\n")
	default:
		log.Fatalf("Failed loading snapshot: %v", err)
	}

	w, err := wal.Open(walDir(*from), walsnap)
	if err != nil {
		log.Fatalf("Failed opening WAL: %v", err)
	}
	wmetadata, state, ents, err := w.ReadAll()
	w.Close()
	if err != nil {
		log.Fatalf("Failed reading WAL: %v", err)
	}
	id, cid := parseWALMetadata(wmetadata)
	vid := types.ID(state.Vote)
	fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
		id, cid, state.Term, state.Commit, vid)

	fmt.Printf("WAL entries:\n")
	fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
	fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
	for _, e := range ents {
		msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			msg = fmt.Sprintf("%s\tnorm", msg)
			var r etcdserverpb.Request
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
				break
			}
			switch r.Method {
			case "":
				msg = fmt.Sprintf("%s\tnoop", msg)
			case "SYNC":
				msg = fmt.Sprintf("%s\tmethod=SYNC time=%q", msg, time.Unix(0, r.Time))
			case "QGET", "DELETE":
				msg = fmt.Sprintf("%s\tmethod=%s path=%s", msg, r.Method, excerpt(r.Path, 64, 64))
			default:
				msg = fmt.Sprintf("%s\tmethod=%s path=%s val=%s", msg, r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
			}
		case raftpb.EntryConfChange:
			msg = fmt.Sprintf("%s\tconf", msg)
			var r raftpb.ConfChange
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
			} else {
				msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
			}
		}
		fmt.Println(msg)
	}
}
예제 #26
0
파일: server.go 프로젝트: dnaeon/etcd
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)

	var (
		w  *wal.WAL
		n  raft.Node
		s  *raft.MemoryStorage
		id types.ID
		cl *membership.RaftCluster
	)

	if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
		return nil, fmt.Errorf("cannot access data directory: %v", terr)
	}

	// Run the migrations.
	dataVer, err := version.DetectDataDir(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if err = upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
		return nil, err
	}

	haveWAL := wal.Exist(cfg.WALDir())

	if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil && !os.IsExist(err) {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	ss := snap.New(cfg.SnapDir())
	be := backend.NewDefaultBackend(path.Join(cfg.SnapDir(), databaseFilename))
	defer func() {
		if err != nil {
			be.Close()
		}
	}()

	prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
	if err != nil {
		return nil, err
	}
	var remotes []*membership.Member
	switch {
	case !haveWAL && !cfg.NewCluster:
		if err = cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
		if gerr != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
		}
		if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
			return nil, fmt.Errorf("incomptible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.ID())
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err = cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			var str string
			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, &DiscoveryError{Op: "join", Err: err}
			}
			var urlsmap types.URLsMap
			urlsmap, err = types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		var snapshot *raftpb.Snapshot
		snapshot, err = ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err = st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cl.Recover()
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
		return nil, fmt.Errorf("cannot access member directory: %v", terr)
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	srv = &EtcdServer{
		readych:   make(chan struct{}),
		Cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			Node:        n,
			ticker:      time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
			raftStorage: s,
			storage:     NewStorage(w, ss),
		},
		id:            id,
		attributes:    membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		peerRt:        prt,
		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
		forceVersionC: make(chan struct{}),
		msgSnapC:      make(chan raftpb.Message, maxInFlightMsgSnap),
	}

	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}

	srv.be = be
	srv.lessor = lease.NewLessor(srv.be)
	srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
	srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
	srv.authStore = auth.NewAuthStore(srv.be)
	if h := cfg.AutoCompactionRetention; h != 0 {
		srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
		srv.compactor.Run()
	}

	if err = srv.restoreAlarms(); err != nil {
		return nil, err
	}

	// TODO: move transport initialization near the definition of remote
	tr := &rafthttp.Transport{
		TLSInfo:     cfg.PeerTLSInfo,
		DialTimeout: cfg.peerDialTimeout(),
		ID:          id,
		URLs:        cfg.PeerURLs,
		ClusterID:   cl.ID(),
		Raft:        srv,
		Snapshotter: ss,
		ServerStats: sstats,
		LeaderStats: lstats,
		ErrorC:      srv.errorc,
	}
	if err = tr.Start(); err != nil {
		return nil, err
	}
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr

	return srv, nil
}
예제 #27
0
파일: snapwrap.go 프로젝트: harche/docker
func (o originalSnap) New(dirpath string) Snapshotter {
	return snap.New(dirpath)
}