예제 #1
0
func TestCandidateToLeader(t *testing.T) {
	log.SetOutput(&bytes.Buffer{})
	defer log.SetOutput(os.Stdout)
	oldMin, oldMax := raft.ResetElectionTimeoutMs(25, 50)
	defer raft.ResetElectionTimeoutMs(oldMin, oldMax)

	noop := func([]byte) ([]byte, error) { return []byte{}, nil }
	server := raft.NewServer(1, &bytes.Buffer{}, noop)
	server.SetPeers(raft.MakePeers(nonresponsivePeer(1), approvingPeer(2), nonresponsivePeer(3)))
	server.Start()
	defer func() { server.Stop(); t.Logf("server stopped") }()

	time.Sleep(raft.MaximumElectionTimeout())

	cutoff := time.Now().Add(2 * raft.MaximumElectionTimeout())
	backoff := raft.BroadcastInterval()
	for {
		if time.Now().After(cutoff) {
			t.Fatal("failed to become Leader")
		}
		if state := server.State(); state != raft.Leader {
			time.Sleep(backoff)
			backoff *= 2
			continue
		}
		t.Logf("became Leader")
		break
	}
}
예제 #2
0
func testServers(t *testing.T, n int) {
	log.SetFlags(log.Lmicroseconds)
	oldMin, oldMax := raft.ResetElectionTimeoutMs(50, 100)
	defer raft.ResetElectionTimeoutMs(oldMin, oldMax)

	// node = Raft protocol server + a HTTP server + a transport bridge
	raftServers := make([]*raft.Server, n)
	httpServers := make([]*http.Server, n)
	raftHttpServers := make([]*rafthttp.Server, n)

	// create them individually
	for i := 0; i < n; i++ {
		// create a Raft protocol server
		raftServers[i] = raft.NewServer(uint64(i+1), &bytes.Buffer{}, noop)

		// wrap that server in a HTTP transport
		raftHttpServers[i] = rafthttp.NewServer(raftServers[i])

		// connect that HTTP transport to a unique HTTP server
		mux := http.NewServeMux()
		httpServers[i] = &http.Server{
			Addr:    fmt.Sprintf("%s:%d", listenHost, basePort+i+1),
			Handler: mux,
		}
		raftHttpServers[i].Install(mux)

		// we have to start the HTTP server, so the NewHTTPPeer ID check works
		// (it can work without starting the actual Raft protocol server)
		go httpServers[i].ListenAndServe()
		t.Logf("Server id=%d @ %s", raftServers[i].Id(), httpServers[i].Addr)
	}

	// build the common set of peers in the network
	peers := raft.Peers{}
	for i := 0; i < n; i++ {
		u, err := url.Parse(fmt.Sprintf("http://%s:%d", listenHost, basePort+i+1))
		if err != nil {
			t.Fatal(err)
		}
		peer, err := rafthttp.NewPeer(*u)
		if err != nil {
			t.Fatal(err)
		}
		peers[peer.Id()] = peer
	}

	// inject each Raft protocol server with its peers
	for _, raftServer := range raftServers {
		raftServer.SetPeers(peers)
	}

	// start each Raft protocol server
	for _, raftServer := range raftServers {
		raftServer.Start()
		defer raftServer.Stop()
	}

	time.Sleep(2 * raft.MaximumElectionTimeout())
}
예제 #3
0
파일: main.go 프로젝트: patrickToca/progol
func main() {
	// Gather statistics about the data file.
	filesz, checksum, err := stat(*datafile)
	if err != nil {
		log.Fatal(err)
	}

	// Set up this process to be discovered. Choose a random port.
	discovery, port, err := newMulticastDiscovery(*host, *discoveryGroup, *discoveryLifetime)
	if err != nil {
		log.Fatal(err)
	}

	// Build a state machine arount the data file statistics. ID is the port.
	machine, err := newStateMachine(uint64(port), filesz, checksum, *maxmem)
	if err != nil {
		log.Fatal(err)
	}

	// Build a Raft server, applying into our state machine. ID is the port.
	raftServer := raft.NewServer(uint64(port), &bytes.Buffer{}, machine.Apply)

	// HTTP server, for Progol discovery/validation, and Raft communication.
	http.HandleFunc(progol.ValidationPath, okHandler)
	raft.HTTPTransport(http.DefaultServeMux, raftServer)
	go http.ListenAndServe(net.JoinHostPort(*host, fmt.Sprint(port)), nil)

	// When we discover new peers at the Progol level,
	// we'll want to propagate that topology to Raft.
	substrate := make(chan []progol.Peer)
	ready := make(chan struct{})
	go propagate(substrate, raftServer, *minimum, ready)

	// Start a validator, to publish Progol peers to our propagator.
	validator := progol.NewValidator(discovery, *validationInterval)
	validator.Subscribe(substrate)
	defer validator.Unsubscribe(substrate)

	// Wait for some minimum number of nodes.
	<-ready

	// Then, start the Raft server.
	log.SetOutput(&bytes.Buffer{})
	raftServer.Start()

	// After some random amount of time, make a claim
	time.Sleep(time.Duration(1000+rand.Intn(1000)) * time.Millisecond)
	raftServer.Command(machine.MakeClaim(), make(chan []byte, 1))

	// Wait until our state machine reports the offset we own.
	offset, length := machine.Segment() // blocking

	// Process that segment
	output := process(*datafile, offset, length)
	fmt.Printf("%s\n", output)

	<-signalHandler()
}
예제 #4
0
파일: main.go 프로젝트: sevki/osquery
func start() {
	rand.Seed(42)
	// Construct the server
	s = raft.NewServer(uint64(*id), &bytes.Buffer{}, apply)

	// Expose the server using a HTTP transport
	raft.HTTPTransport(http.DefaultServeMux, s)
	http.HandleFunc("/response", response)
	serve := func() {
		err := http.ListenAndServe(*httpAddr, nil)
		if err != nil {
			panic(err)
		} else {
			log.Println("contd")
		}

	}
	go serve()

	time.Sleep(time.Second * 5)

	peers := mustNewPeers()

	// Set the initial server configuration
	s.SetConfiguration(peers...)

	// Start the server
	s.Start()

	fmt.Println("READY!")

	dn, _ := os.Open("/dev/null")
	log.SetOutput(dn)

	for {
		cmd := make(chan []byte)

		var bytz []byte
		buf := bytes.NewBuffer(bytz)
		enc := gob.NewEncoder(buf)
		err := enc.Encode(parseQuery())
		if err != nil {
			panic(err)
		}

		if err := s.Command(buf.Bytes(), cmd); err != nil {
			panic(err)
		}
		fmt.Println(string(<-cmd))
	}
}
예제 #5
0
func ExampleServer_Command() {
	// A no-op ApplyFunc that always returns "PONG"
	ponger := func(uint64, []byte) []byte { return []byte(`PONG`) }

	// Assuming you have a server started
	s := raft.NewServer(1, &bytes.Buffer{}, ponger)

	// Issue a command into the network
	response := make(chan []byte)
	if err := s.Command([]byte(`PING`), response); err != nil {
		panic(err) // command not accepted
	}

	// After the command is replicated, we'll receive the response
	fmt.Printf("%s\n", <-response)
}
예제 #6
0
func TestFailedElection(t *testing.T) {
	log.SetOutput(&bytes.Buffer{})
	defer log.SetOutput(os.Stdout)
	oldMin, oldMax := raft.ResetElectionTimeoutMs(25, 50)
	defer raft.ResetElectionTimeoutMs(oldMin, oldMax)

	noop := func([]byte) ([]byte, error) { return []byte{}, nil }
	server := raft.NewServer(1, &bytes.Buffer{}, noop)
	server.SetPeers(raft.MakePeers(disapprovingPeer(2), nonresponsivePeer(3)))
	server.Start()
	defer func() { server.Stop(); t.Logf("server stopped") }()

	time.Sleep(2 * raft.ElectionTimeout())
	if server.State() == raft.Leader {
		t.Fatalf("erroneously became Leader")
	}
	t.Logf("remained %s", server.State())
}
예제 #7
0
func ExampleNewServer_hTTP() {
	// A no-op ApplyFunc
	a := func(uint64, []byte) []byte { return []byte{} }

	// Helper function to parse URLs
	mustParseURL := func(rawurl string) *url.URL {
		u, err := url.Parse(rawurl)
		if err != nil {
			panic(err)
		}
		u.Path = ""
		return u
	}

	// Helper function to construct HTTP Peers
	mustNewHTTPPeer := func(u *url.URL) raft.Peer {
		p, err := raft.NewHTTPPeer(u)
		if err != nil {
			panic(err)
		}
		return p
	}

	// Construct the server
	s := raft.NewServer(1, &bytes.Buffer{}, a)

	// Expose the server using a HTTP transport
	raft.HTTPTransport(http.DefaultServeMux, s)
	go http.ListenAndServe(":8080", nil)

	// Set the initial server configuration
	s.SetConfiguration(
		mustNewHTTPPeer(mustParseURL("http://127.0.0.1:8080")), // this server
		mustNewHTTPPeer(mustParseURL("http://10.1.1.11:8080")),
		mustNewHTTPPeer(mustParseURL("http://10.1.1.12:8080")),
		mustNewHTTPPeer(mustParseURL("http://10.1.1.13:8080")),
		mustNewHTTPPeer(mustParseURL("http://10.1.1.14:8080")),
	)

	// Start the server
	s.Start()
}
예제 #8
0
func testOrder(t *testing.T, nServers int) {
	values := rand.Perm(8 + rand.Intn(16))

	// command and response
	type send struct {
		Send int `json:"send"`
	}
	type recv struct {
		Recv int `json:"recv"`
	}
	do := func(sb *synchronizedBuffer) func(buf []byte) ([]byte, error) {
		return func(buf []byte) ([]byte, error) {
			sb.Write(buf)                           // write incoming message
			var s send                              // decode incoming message
			json.Unmarshal(buf, &s)                 // ...
			return json.Marshal(recv{Recv: s.Send}) // write outgoing message
		}
	}

	// set up the cluster
	servers := []*raft.Server{}        // server components
	storage := []*bytes.Buffer{}       // persistent log storage
	buffers := []*synchronizedBuffer{} // the "state machine" for each server
	for i := 0; i < nServers; i++ {
		buffers = append(buffers, &synchronizedBuffer{})
		storage = append(storage, &bytes.Buffer{})
		servers = append(servers, raft.NewServer(uint64(i+1), storage[i], do(buffers[i])))
	}
	peers := raft.Peers{}
	for _, server := range servers {
		peers[server.Id()] = raft.NewLocalPeer(server)
	}
	for _, server := range servers {
		server.SetPeers(peers)
	}

	// define cmds
	cmds := []send{}
	for _, v := range values {
		cmds = append(cmds, send{v})
	}

	// the expected "state-machine" output of applying each command
	expectedBuffer := &synchronizedBuffer{}
	for _, cmd := range cmds {
		buf, _ := json.Marshal(cmd)
		expectedBuffer.Write(buf)
	}

	// boot up the cluster
	for _, server := range servers {
		server.Start()
		defer func(server0 *raft.Server) {
			log.Printf("issuing stop command to server %d", server0.Id())
			server0.Stop()
		}(server)
	}

	// send commands
	for i, cmd := range cmds {
		id := uint64(rand.Intn(nServers)) + 1
		peer := peers[id]
		buf, _ := json.Marshal(cmd)
		response := make(chan []byte, 1)
	retry:
		for {
			log.Printf("command=%d/%d peer=%d: sending %s", i+1, len(cmds), id, buf)
			switch err := peer.Command(buf, response); err {
			case nil:
				log.Printf("command=%d/%d peer=%d: OK", i+1, len(cmds), id)
				break retry
			case raft.ErrUnknownLeader, raft.ErrDeposed:
				log.Printf("command=%d/%d peer=%d: failed (%s) -- will retry", i+1, len(cmds), id, err)
				time.Sleep(raft.ElectionTimeout())
				continue
			case raft.ErrTimeout:
				log.Printf("command=%d/%d peer=%d: timed out -- assume it went through", i+1, len(cmds), id)
				break retry
			default:
				t.Fatalf("command=%d/%d peer=%d: failed (%s) -- fatal", i+1, len(cmds), id, err)
			}
		}
		r, ok := <-response
		if !ok {
			log.Printf("command=%d/%d peer=%d: truncated, will retry", i+1, len(cmds), id)
			response = make(chan []byte, 1) // channel was closed, must re-make
			goto retry
		}
		log.Printf("command=%d/%d peer=%d: OK, got response %s", i+1, len(cmds), id, string(r))
	}

	// done sending
	log.Printf("testOrder done sending %d command(s) to network", len(cmds))

	// check the buffers (state machines)
	for i, sb := range buffers {
		for {
			expected, got := expectedBuffer.String(), sb.String()
			if len(got) < len(expected) {
				t.Logf("server %d: not yet fully replicated, will check again", i+1)
				time.Sleep(raft.BroadcastInterval())
				continue // retry
			}
			if expected != got {
				t.Errorf("server %d: fully replicated, expected\n\t%s, got\n\t%s", i+1, expected, got)
				break
			}
			t.Logf("server %d: %s OK", i+1, got)
			break
		}
	}
}
예제 #9
0
func TestSimpleConsensus(t *testing.T) {
	logBuffer := &bytes.Buffer{}
	log.SetOutput(logBuffer)
	defer log.SetOutput(os.Stdout)
	defer printOnFailure(t, logBuffer)
	oldMin, oldMax := raft.ResetElectionTimeoutMs(25, 50)
	defer raft.ResetElectionTimeoutMs(oldMin, oldMax)

	type SetValue struct {
		Value int32 `json:"value"`
	}

	var i1, i2, i3 int32

	applyValue := func(id uint64, i *int32) func([]byte) ([]byte, error) {
		return func(cmd []byte) ([]byte, error) {
			var sv SetValue
			if err := json.Unmarshal(cmd, &sv); err != nil {
				return []byte{}, err
			}
			atomic.StoreInt32(i, sv.Value)
			return json.Marshal(map[string]interface{}{"applied_to_server": id, "applied_value": sv.Value})
		}
	}

	s1 := raft.NewServer(1, &bytes.Buffer{}, applyValue(1, &i1))
	s2 := raft.NewServer(2, &bytes.Buffer{}, applyValue(2, &i2))
	s3 := raft.NewServer(3, &bytes.Buffer{}, applyValue(3, &i3))

	s1Responses := &synchronizedBuffer{}
	s2Responses := &synchronizedBuffer{}
	s3Responses := &synchronizedBuffer{}
	defer func(sb *synchronizedBuffer) { t.Logf("s1 responses: %s", sb.String()) }(s1Responses)
	defer func(sb *synchronizedBuffer) { t.Logf("s2 responses: %s", sb.String()) }(s2Responses)
	defer func(sb *synchronizedBuffer) { t.Logf("s3 responses: %s", sb.String()) }(s3Responses)

	peers := raft.MakePeers(
		raft.NewLocalPeer(s1),
		raft.NewLocalPeer(s2),
		raft.NewLocalPeer(s3),
	)

	s1.SetPeers(peers)
	s2.SetPeers(peers)
	s3.SetPeers(peers)

	s1.Start()
	s2.Start()
	s3.Start()
	defer s1.Stop()
	defer s2.Stop()
	defer s3.Stop()

	var v int32 = 42
	cmd, _ := json.Marshal(SetValue{v})

	response := make(chan []byte, 1)
	func() {
		for {
			switch err := s1.Command(cmd, response); err {
			case nil:
				return
			case raft.ErrUnknownLeader:
				time.Sleep(raft.MinimumElectionTimeout())
			default:
				t.Fatal(err)
			}
		}
	}()

	r, ok := <-response
	if ok {
		s1Responses.Write(r)
	} else {
		t.Logf("didn't receive command response")
	}

	ticker := time.Tick(raft.BroadcastInterval())
	timeout := time.After(1 * time.Second)
	for {
		select {
		case <-ticker:
			i1l := atomic.LoadInt32(&i1)
			i2l := atomic.LoadInt32(&i2)
			i3l := atomic.LoadInt32(&i3)
			t.Logf("i1=%02d i2=%02d i3=%02d", i1l, i2l, i3l)
			if i1l == v && i2l == v && i3l == v {
				t.Logf("success!")
				return
			}

		case <-timeout:
			t.Fatal("timeout")
		}
	}
}