Example #1
0
// same as RunSimulator except where the log in persistent storage is given
func RunRecoverySimulator(nodes int, logs [][]msgs.Entry, views []int) []*msgs.Io {
	ios := make([]*msgs.Io, nodes)

	// setup state
	for id := 0; id < nodes; id++ {
		io := msgs.MakeIo(10, nodes)
		conf := consensus.Config{id, nodes}
		go consensus.Recover(io, conf, views[id], logs[id])
		go io.DumpPersistentStorage()
		ios[id] = io
	}

	// forward traffic
	for to := range ios {
		for from := range ios {
			go ios[to].Incoming.Forward(ios[from].OutgoingUnicast[to])
		}
	}

	return ios
}
Example #2
0
func main() {
	// set up logging
	flag.Parse()
	defer glog.Flush()

	conf := config.ParseServerConfig(*config_file)
	if *id == -1 {
		glog.Fatal("ID is required")
	}

	glog.Info("Starting server ", *id)
	defer glog.Warning("Shutting down server ", *id)

	//set up state machine
	keyval = store.New()
	c = cache.Create()
	// setup IO
	cons_io = msgs.MakeIo(2000, len(conf.Peers.Address))

	notifyclient = make(map[msgs.ClientRequest](chan msgs.ClientResponse))
	notifyclient_mutex = sync.RWMutex{}
	go stateMachine()

	// setting up persistent log
	disk, disk_reader, is_empty := openFile(*disk_path + "/persistent_log_" + strconv.Itoa(*id) + ".temp")
	defer disk.Flush()
	meta_disk, meta_disk_reader, is_new := openFile(*disk_path + "/persistent_data_" + strconv.Itoa(*id) + ".temp")
	defer meta_disk.Flush()

	// check persistent storage for commands
	found := false
	log := make([]msgs.Entry, 10000) //TODO: Fix this

	if !is_empty {
		for {
			b, err := disk_reader.ReadBytes(byte('\n'))
			if err != nil {
				glog.Info("No more commands in persistent storage")
				break
			}
			found = true
			var update msgs.LogUpdate
			err = msgs.Unmarshal(b, &update)
			if err != nil {
				glog.Fatal("Cannot parse log update", err)
			}
			log[update.Index] = update.Entry
			glog.Info("Adding for persistent storage :", update)
		}
	}

	// check persistent storage for view
	view := 0
	if !is_new {
		for {
			b, err := meta_disk_reader.ReadBytes(byte('\n'))
			if err != nil {
				glog.Info("No more view updates in persistent storage")
				break
			}
			found = true
			view, _ = strconv.Atoi(string(b))
		}
	}

	// write updates to persistent storage
	go func() {
		for {
			view := <-cons_io.ViewPersist
			glog.Info("Updating view to ", view)
			_, err := meta_disk.Write([]byte(strconv.Itoa(view)))
			_, err = meta_disk.Write([]byte("\n"))
			if err != nil {
				glog.Fatal(err)
			}
		}
	}()

	go func() {
		for {
			log := <-cons_io.LogPersist
			glog.Info("Updating log with ", log)
			b, err := msgs.Marshal(log)
			if err != nil {
				glog.Fatal(err)
			}
			// write to persistent storage
			n1, err := disk.Write(b)
			n2, err := disk.Write([]byte("\n"))
			if err != nil {
				glog.Fatal(err)
			}
			glog.Info(n1+n2, " bytes written to persistent log")
		}
	}()

	// set up client server
	glog.Info("Starting up client server")
	listeningPort := ":" + strconv.Itoa(*client_port)
	ln, err := net.Listen("tcp", listeningPort)
	if err != nil {
		glog.Fatal(err)
	}

	// handle for incoming clients
	go func() {
		for {
			conn, err := ln.Accept()
			if err != nil {
				glog.Fatal(err)
			}
			go handleConnection(conn)
		}
	}()

	//set up peer state
	peers = make([]Peer, len(conf.Peers.Address))
	for i := range conf.Peers.Address {
		peers[i] = Peer{
			i, conf.Peers.Address[i], false}
	}
	peers_mutex = sync.RWMutex{}

	//set up peer server
	glog.Info("Starting up peer server")
	listeningPort = ":" + strconv.Itoa(*peer_port)
	lnPeers, err := net.Listen("tcp", listeningPort)
	if err != nil {
		glog.Fatal(err)
	}

	// handle local peer (without sending network traffic)
	peers_mutex.Lock()
	peers[*id].handled = true
	peers_mutex.Unlock()
	from := &(cons_io.Incoming)
	go from.Forward(cons_io.OutgoingUnicast[*id])

	// handle for incoming peers
	go func() {
		for {
			conn, err := lnPeers.Accept()
			if err != nil {
				glog.Fatal(err)
			}
			go handlePeer(conn, false)
		}
	}()

	// regularly check if all peers are connected and retry if not
	go func() {
		for {
			checkPeer()
			time.Sleep(100 * time.Millisecond)
		}
	}()

	// setting up the consensus algorithm
	log_length := 1000
	if conf.Options.Length > 0 {
		log_length = conf.Options.Length
	}
	cons_config := consensus.Config{*id, len(conf.Peers.Address),
		log_length, conf.Options.BatchInterval, conf.Options.MaxBatch}
	if !found {
		glog.Info("Starting fresh consensus instance")
		go consensus.Init(cons_io, cons_config)
	} else {
		glog.Info("Restoring consensus instance")
		go consensus.Recover(cons_io, cons_config, view, log)
	}
	//go cons_io.DumpPersistentStorage()

	// tidy up
	glog.Info("Setup complete")

	// waiting for exit
	// always flush (whatever happens)
	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
	sig := <-sigs
	disk.Flush()
	meta_disk.Flush()
	glog.Flush()
	glog.Warning("Shutting down due to ", sig)
}