コード例 #1
0
ファイル: server.go プロジェクト: heidi-ann/ios
func handleConnection(cn net.Conn) {
	glog.Info("Incoming client connection from ",
		cn.RemoteAddr().String())

	reader := bufio.NewReader(cn)
	writer := bufio.NewWriter(cn)

	for {

		// read request
		glog.Info("Ready for Reading")
		text, err := reader.ReadBytes(byte('\n'))
		if err != nil {
			if err == io.EOF {
				break
			}
			glog.Warning(err)
			break
		}
		glog.Info("--------------------New request----------------------")
		glog.Info("Request: ", string(text))
		req := new(msgs.ClientRequest)
		err = msgs.Unmarshal(text, req)
		if err != nil {
			glog.Fatal(err)
		}

		// construct reply
		reply := handleRequest(*req)
		b, err := msgs.Marshal(reply)
		if err != nil {
			glog.Fatal("error:", err)
		}
		glog.Info(string(b))

		// send reply
		// TODO: FIX currently all server send back replies
		glog.Info("Sending ", string(b))
		n, err := writer.Write(b)
		_, err = writer.Write([]byte("\n"))
		if err != nil {
			glog.Fatal(err)
		}

		// tidy up
		err = writer.Flush()
		glog.Info("Finished sending ", n, " bytes")

	}

	cn.Close()
}
コード例 #2
0
ファイル: client.go プロジェクト: heidi-ann/ios
func main() {
	// set up logging
	flag.Parse()
	defer glog.Flush()

	// always flush (whatever happens)
	sigs := make(chan os.Signal, 1)
	finish := make(chan bool, 1)
	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)

	// parse config files
	conf := config.ParseClientConfig(*config_file)
	timeout := time.Millisecond * time.Duration(conf.Parameters.Timeout)
	// TODO: find a better way to handle required flags
	if *id == -1 {
		glog.Fatal("ID must be provided")
	}

	glog.Info("Starting up client ", *id)
	defer glog.Info("Shutting down client ", *id)

	// set up stats collection
	filename := *stat_file
	glog.Info("Opening file: ", filename)
	file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0777)
	if err != nil {
		glog.Fatal(err)
	}
	stats := csv.NewWriter(file)
	defer stats.Flush()

	// set up request id
	// TODO: write this value to disk
	requestID := 1

	// connecting to server
	conn, leader, err := connect(conf.Addresses.Address, 1, 0)
	if err != nil {
		glog.Fatal(err)
	}
	rd := bufio.NewReader(conn)

	// setup API
	var ioapi API
	switch *mode {
	case "interactive":
		ioapi = interactive.Create()
	case "test":
		ioapi = test.Generate(test.ParseAuto(*auto_file))
	case "rest":
		ioapi = rest.Create()
	default:
		glog.Fatal("Invalid mode: ", mode)
	}

	glog.Info("Client is ready to start processing incoming requests")
	go func() {
		for {
			// get next command
			text, replicate, ok := ioapi.Next()
			if !ok {
				finish <- true
				break
			}
			glog.Info("Request ", requestID, " is: ", text)

			// encode as request
			req := msgs.ClientRequest{
				*id, requestID, replicate, text}
			b, err := msgs.Marshal(req)
			if err != nil {
				glog.Fatal(err)
			}
			glog.Info(string(b))

			startTime := time.Now()
			tries := 0

			// dispatch request until successfull
			var reply *msgs.ClientResponse
			for {
				tries++
				replyBytes, err := dispatcher(b, conn, rd, timeout)
				if err == nil {

					//handle reply
					reply = new(msgs.ClientResponse)
					err = msgs.Unmarshal(replyBytes, reply)

					if err == nil {
						break
					}
				}
				glog.Warning("Request ", requestID, " failed due to: ", err)

				// try to establish a new connection
				for {
					conn, leader, err = connect(conf.Addresses.Address, leader+1, conf.Parameters.Retries)
					if err == nil {
						break
					}
					glog.Warning("Serious connectivity issues")
					time.Sleep(time.Second)
				}

				rd = bufio.NewReader(conn)

			}

			//check reply is not nil
			if *reply == (msgs.ClientResponse{}) {
				glog.Fatal("Response is nil")
			}

			//check reply is as expected
			if reply.ClientID != *id {
				glog.Fatal("Response received has wrong ClientID: expected ",
					*id, " ,received ", reply.ClientID)
			}
			if reply.RequestID != requestID {
				glog.Fatal("Response received has wrong RequestID: expected ",
					requestID, " ,received ", reply.RequestID)
			}

			// write to latency to log
			latency := strconv.FormatInt(time.Since(startTime).Nanoseconds(), 10)
			err = stats.Write([]string{startTime.String(), strconv.Itoa(requestID), latency, strconv.Itoa(tries)})
			if err != nil {
				glog.Fatal(err)
			}
			stats.Flush()
			// TODO: call error to check if successful

			requestID++
			// writing result to user
			// time.Since(startTime)
			ioapi.Return(reply.Response)

		}
	}()

	select {
	case sig := <-sigs:
		glog.Warning("Termination due to: ", sig)
	case <-finish:
		glog.Info("No more commands")
	}
	glog.Flush()

}
コード例 #3
0
ファイル: server.go プロジェクト: heidi-ann/ios
func main() {
	// set up logging
	flag.Parse()
	defer glog.Flush()

	conf := config.ParseServerConfig(*config_file)
	if *id == -1 {
		glog.Fatal("ID is required")
	}

	glog.Info("Starting server ", *id)
	defer glog.Warning("Shutting down server ", *id)

	//set up state machine
	keyval = store.New()
	c = cache.Create()
	// setup IO
	cons_io = msgs.MakeIo(2000, len(conf.Peers.Address))

	notifyclient = make(map[msgs.ClientRequest](chan msgs.ClientResponse))
	notifyclient_mutex = sync.RWMutex{}
	go stateMachine()

	// setting up persistent log
	disk, disk_reader, is_empty := openFile(*disk_path + "/persistent_log_" + strconv.Itoa(*id) + ".temp")
	defer disk.Flush()
	meta_disk, meta_disk_reader, is_new := openFile(*disk_path + "/persistent_data_" + strconv.Itoa(*id) + ".temp")
	defer meta_disk.Flush()

	// check persistent storage for commands
	found := false
	log := make([]msgs.Entry, 10000) //TODO: Fix this

	if !is_empty {
		for {
			b, err := disk_reader.ReadBytes(byte('\n'))
			if err != nil {
				glog.Info("No more commands in persistent storage")
				break
			}
			found = true
			var update msgs.LogUpdate
			err = msgs.Unmarshal(b, &update)
			if err != nil {
				glog.Fatal("Cannot parse log update", err)
			}
			log[update.Index] = update.Entry
			glog.Info("Adding for persistent storage :", update)
		}
	}

	// check persistent storage for view
	view := 0
	if !is_new {
		for {
			b, err := meta_disk_reader.ReadBytes(byte('\n'))
			if err != nil {
				glog.Info("No more view updates in persistent storage")
				break
			}
			found = true
			view, _ = strconv.Atoi(string(b))
		}
	}

	// write updates to persistent storage
	go func() {
		for {
			view := <-cons_io.ViewPersist
			glog.Info("Updating view to ", view)
			_, err := meta_disk.Write([]byte(strconv.Itoa(view)))
			_, err = meta_disk.Write([]byte("\n"))
			if err != nil {
				glog.Fatal(err)
			}
		}
	}()

	go func() {
		for {
			log := <-cons_io.LogPersist
			glog.Info("Updating log with ", log)
			b, err := msgs.Marshal(log)
			if err != nil {
				glog.Fatal(err)
			}
			// write to persistent storage
			n1, err := disk.Write(b)
			n2, err := disk.Write([]byte("\n"))
			if err != nil {
				glog.Fatal(err)
			}
			glog.Info(n1+n2, " bytes written to persistent log")
		}
	}()

	// set up client server
	glog.Info("Starting up client server")
	listeningPort := ":" + strconv.Itoa(*client_port)
	ln, err := net.Listen("tcp", listeningPort)
	if err != nil {
		glog.Fatal(err)
	}

	// handle for incoming clients
	go func() {
		for {
			conn, err := ln.Accept()
			if err != nil {
				glog.Fatal(err)
			}
			go handleConnection(conn)
		}
	}()

	//set up peer state
	peers = make([]Peer, len(conf.Peers.Address))
	for i := range conf.Peers.Address {
		peers[i] = Peer{
			i, conf.Peers.Address[i], false}
	}
	peers_mutex = sync.RWMutex{}

	//set up peer server
	glog.Info("Starting up peer server")
	listeningPort = ":" + strconv.Itoa(*peer_port)
	lnPeers, err := net.Listen("tcp", listeningPort)
	if err != nil {
		glog.Fatal(err)
	}

	// handle local peer (without sending network traffic)
	peers_mutex.Lock()
	peers[*id].handled = true
	peers_mutex.Unlock()
	from := &(cons_io.Incoming)
	go from.Forward(cons_io.OutgoingUnicast[*id])

	// handle for incoming peers
	go func() {
		for {
			conn, err := lnPeers.Accept()
			if err != nil {
				glog.Fatal(err)
			}
			go handlePeer(conn, false)
		}
	}()

	// regularly check if all peers are connected and retry if not
	go func() {
		for {
			checkPeer()
			time.Sleep(100 * time.Millisecond)
		}
	}()

	// setting up the consensus algorithm
	log_length := 1000
	if conf.Options.Length > 0 {
		log_length = conf.Options.Length
	}
	cons_config := consensus.Config{*id, len(conf.Peers.Address),
		log_length, conf.Options.BatchInterval, conf.Options.MaxBatch}
	if !found {
		glog.Info("Starting fresh consensus instance")
		go consensus.Init(cons_io, cons_config)
	} else {
		glog.Info("Restoring consensus instance")
		go consensus.Recover(cons_io, cons_config, view, log)
	}
	//go cons_io.DumpPersistentStorage()

	// tidy up
	glog.Info("Setup complete")

	// waiting for exit
	// always flush (whatever happens)
	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
	sig := <-sigs
	disk.Flush()
	meta_disk.Flush()
	glog.Flush()
	glog.Warning("Shutting down due to ", sig)
}