Пример #1
0
/**@brief append newitem to list
 * @param key
 * @param newitem
 * @return error
 */
func (ls *Libstore) iAppendToList(key, newitem string) error {
	var cli *rpc.Client
	var args storageproto.PutArgs = storageproto.PutArgs{key, newitem}
	var reply storageproto.PutReply
	var err error

	cli, err = ls.GetServer(key)
	if lsplog.CheckReport(1, err) {
		return err
	}

	//lsplog.Vlogf(0, "AppendToList args %v\n", args)

	err = cli.Call("StorageRPC.AppendToList", &args, &reply)
	if lsplog.CheckReport(1, err) {
		return err
	}

	//lsplog.Vlogf(0, "AppendToList reply %v\n", reply)

	if reply.Status != storageproto.OK {
		return MakeErr("AppendToList()", reply.Status)
	}

	return nil
}
Пример #2
0
// NewRPCClient takes a net/rpc Client that should point to an instance...
func NewXZRPCClient(c *rpc.Client) func(method string) endpoint.Endpoint {
	return func(method string) endpoint.Endpoint {
		return func(ctx context.Context, request interface{}) (interface{}, error) {
			var (
				errs      = make(chan error, 1)
				responses = make(chan interface{}, 1)
			)
			go func() {
				var response XZResponse
				if err := c.Call(method, request, &response); err != nil {
					errs <- err
					return
				}
				responses <- response
			}()
			select {
			case <-ctx.Done():
				return nil, context.DeadlineExceeded
			case err := <-errs:
				return nil, err
			case response := <-responses:
				return response, nil
			}
		}
	}
}
Пример #3
0
// NewStorageServer creates and starts a new StorageServer. masterServerHostPort
// is the master storage server's host:port address. If empty, then this server
// is the master; otherwise, this server is a slave. numNodes is the total number of
// servers in the ring. port is the port number that this server should listen on.
// nodeID is a random, unsigned 32-bit ID identifying this server.
//
// This function should return only once all storage servers have joined the ring,
// and should return a non-nil error if the storage server could not be started.
func NewStorageServer(masterServerHostPort string, numNodes, port int, nodeID uint32) (StorageServer, error) {

	// Set upt this server's info
	serverInfo := storagerpc.Node{HostPort: fmt.Sprintf("localhost:%d", port), NodeID: nodeID}
	var ss storageServer

	if masterServerHostPort == "" {

		// If this is the master server, set up a list of servers
		var servers = make([]storagerpc.Node, numNodes)
		servers[0] = serverInfo

		// Create the master server
		ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID,
			servers: servers, count: 1, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)}

	} else {
		// Try to connect to the master at most five times
		args := storagerpc.RegisterArgs{ServerInfo: serverInfo}
		var reply storagerpc.RegisterReply
		var err error
		var master *rpc.Client
		for try := 1; try <= 5; try++ {
			master, err = rpc.DialHTTP("tcp", masterServerHostPort)
			if err == nil {
				break
			}
			if try == 5 {
				return nil, err
			}
			time.Sleep(time.Millisecond * 20)
		}
		for i := 1; i <= 5; i++ {
			master.Call("StorageServer.RegisterServer", args, &reply)
			if reply.Status == storagerpc.OK {
				// All servers are connected, create this slave server
				ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID,
					servers: reply.Servers, count: numNodes, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)}
				break
			}
			// Wait one second, try to connect to master again
			if i == 5 {
				return nil, errors.New("couldn't connect to master")
			}
			time.Sleep(time.Millisecond * 20)

		}
	}

	// Start listening for connections from other storageServers and libstores
	rpc.RegisterName("StorageServer", &ss)
	rpc.HandleHTTP()
	l, e := net.Listen("tcp", serverInfo.HostPort)
	if e != nil {
		return nil, errors.New("Storage server couldn't start listening")
	}
	go http.Serve(l, nil)

	return &ss, nil
}
Пример #4
0
// MakeRemoteCall calls a function at a remote peer 'callee' synchronously. The usage of the three
// last arguments is identical to that of net/rpc's '(client *Client) Call' function.
func (rpcServ *RPCService) MakeRemoteCall(callee *Peer, call string, args interface{},
	result interface{}) error {
	if callee == nil {
		return nil
	}
	// Check if there is already a connection
	var client *rpc.Client
	var err error

	rpcServ.RLock()
	client = rpcServ.clientMap[callee.Address]
	rpcServ.RUnlock()

	// Open if not
	if client == nil {
		client, err = rpcServ.rpcConnect(callee)
		if err != nil {
			fmt.Println("RPC Connect failed!")
			return err
		}
	}
	err = client.Call("Node."+call, args, result)
	if err != nil {
		log.Print("RPC call failed, client "+callee.Address+" down? ", err)
		if err == rpc.ErrShutdown || reflect.TypeOf(err) == reflect.TypeOf((*rpc.ServerError)(nil)).Elem() {
			rpcServ.rpcClose(callee)
		}
	}

	return err
}
Пример #5
0
func (srv *ServerNode) SendPeerMessage(peerName string, api string, args interface{}, reply interface{}) error {
	srv.lock.RLock()
	// Find the peer
	var peer *ServerPeer
	peer = srv.peers[peerName]
	srv.lock.RUnlock()

	if peer == nil {
		return ERR_PEER_NOT_FOUND
	}
	// Attempt to get a connection from the pool
	//srv_log("Looking for connection in pool for peer %v\n", peerName)
	var client *rpc.Client
	var ok bool
	client, ok = <-peer.connections
	if !ok {
		srv_log("Peer %v connection in shutdown - unable to send to peer\n", peerName)
		return ERR_PEER_NOT_FOUND
	}

	//srv_log("Found connection - sending api call %v\n", api)
	err := client.Call(api, args, reply)
	if err != nil {
		srv_log("Error in outbound call to %v - closing client and asking for a new connection: %v\n", peerName, err)
		client.Close()
		peer.broken_connections <- struct{}{}
	} else {
		//srv_log("Call worked - returning connection to the pool\n")
		// It worked - restore the connection to the pool
		peer.connections <- client
	}
	return err
}
Пример #6
0
func (ps *server) RPCAddDoc(client *rpc.Client, docId, myHostPort string) error {
	// Pear Server -> Pear Central: Requesting Add Fresh New Document
	tries := maxTries
	for tries > 0 {
		// Make RPC Call to Master
		args := &centralrpc.AddDocArgs{
			DocId:    docId,
			HostPort: myHostPort,
		}
		var reply centralrpc.AddDocReply
		if err := client.Call("PearCentral.AddDoc", args, &reply); err != nil {
			return err
		}
		// common.LOGV.Println("$Call AddDoc:",reply)
		// Check reply from Master
		if reply.Status == centralrpc.OK {
			_, ok := ps.docToServerMap[docId]
			if !ok {
				ps.docToServerMap[docId] = make(map[string]bool)
			}
			ps.docToServerMap[docId] = reply.Teammates
			return nil
		} else if reply.Status == centralrpc.DocExist {
			common.LOGE.Println("Doc ", docId, " already Exist")
			return nil
		}
		time.Sleep(time.Second)
		tries--
	}
	return errors.New("RPCAddDoc Failed.")
}
Пример #7
0
func statusRpcCall(client *rpc.Client, args interface{}) {
	var reply []*report.App
	err := client.Call("Rpc.Status", args, &reply)
	if err != nil {
		log.Fatal("error:", err)
	}

	tabWriter := tabwriter.NewWriter(os.Stdout, 2, 2, 1, ' ', 0)
	for _, appReport := range reply {
		fmt.Fprintf(tabWriter, "[%s/%s:%d]\n", appReport.Name, appReport.Host, appReport.Port)

		for _, instanceReport := range appReport.Instances {
			if instanceReport.Active {
				fmt.Fprint(tabWriter, "*\t")
			} else {
				fmt.Fprint(tabWriter, "\t")
			}

			fmt.Fprintf(tabWriter, "%d/%s:%d\t", instanceReport.Id, instanceReport.Host, instanceReport.Port)

			fmt.Fprintf(tabWriter, "%s\t", instanceReport.Status)

			fmt.Fprintf(tabWriter, "%s\t", time.Duration(instanceReport.SinceStatusChange)*time.Second)

			fmt.Fprintf(tabWriter, "%s\n", instanceReport.Error)
		}
	}

	tabWriter.Flush()
}
Пример #8
0
func testEchoService(t *testing.T, client *rpc.Client) {
	var args EchoRequest
	var reply EchoResponse
	var err error

	// EchoService.EchoTwice
	args.Msg = proto.String(echoRequest)
	err = client.Call("EchoService.EchoTwice", &args, &reply)
	if err != nil {
		t.Fatalf(`EchoService.EchoTwice: %v`, err)
	}
	if reply.GetMsg() != echoResponse {
		t.Fatalf(
			`EchoService.EchoTwice: expected = "%s", got = "%s"`,
			echoResponse, reply.GetMsg(),
		)
	}

	// EchoService.EchoTwice (Massive)
	args.Msg = proto.String(echoMassiveRequest)
	err = client.Call("EchoService.EchoTwice", &args, &reply)
	if err != nil {
		t.Fatalf(`EchoService.EchoTwice: %v`, err)
	}
	if reply.GetMsg() != echoMassiveResponse {
		got := reply.GetMsg()
		if len(got) > 8 {
			got = got[:8] + "..."
		}
		t.Fatalf(`EchoService.EchoTwice: len = %d, got = %v`,
			len(reply.GetMsg()), got,
		)
	}
}
Пример #9
0
//VerifyToken authenticates the zistcl request to zistd
func VerifyToken(client *rpc.Client, token string) (bool, error) {
	var valid bool
	if err := client.Call("Communicator.VerifyToken", token, &valid); err != nil {
		return false, err
	}
	return valid, nil
}
Пример #10
0
func run_argstring(client *rpc.Client) {

	var reply int
	arg := &myrpc.Sarg{Na: "aaaa", Id: 10, Va: "bbbbbbb"}
	fmt.Printf("mseed: key:%s value:%s\n", arg.Na, arg.Va)

	arg.List = make([]int, 5)
	for i := 0; i < len(arg.List); i++ {
		arg.List[i] = i
	}
	fmt.Printf("len:%d\n", len(arg.List))

	err := client.Call("MSeed.AddString", arg, &reply)
	if err != nil {
		log.Fatal("arith error:", err)
	}

	/*
		var arg1 string;

		err = client.Call("MSeed.GetString",arg.Na, &arg1)
		if err != nil {
			log.Fatal("arith error:", err)
		}

		fmt.Printf("mseed: key:%s value:%s\n",arg.Na,arg1)
	*/
	//sed := new(myrpc.Args)
	var sed myrpc.Sarg
	err = client.Call("MSeed.GetSeed", "aaaa", &sed)
	if err != nil {
		log.Fatal("arith error:", err)
	}
	fmt.Println(sed)
}
Пример #11
0
func cmdExit(c *rpc.Client) {
	var req ExitRequest
	var res ExitReply
	if err := c.Call("Server.Exit", &req, &res); err != nil {
		panic(err)
	}
}
Пример #12
0
func Request(c *rpc.Client, req *clientRequest, res string) (string, error) {
	err := c.Call(req.Method, req.Params, &res)
	if err != nil {
		fmt.Printf("ERROR(call): %v\n", err)
	}
	return res, err
}
Пример #13
0
func benchmarkClient(client *rpc.Client, b *testing.B) {
	// Synchronous calls
	args := &Args{7, 8}
	procs := runtime.GOMAXPROCS(-1)
	N := int32(b.N)
	var wg sync.WaitGroup
	wg.Add(procs)
	b.StartTimer()

	for p := 0; p < procs; p++ {
		go func() {
			reply := new(Reply)
			for atomic.AddInt32(&N, -1) >= 0 {
				err := client.Call("Arith.Mul", args, reply)
				if err != nil {
					b.Fatalf("rpc error: Mul: expected no error but got string %q", err.Error())
				}
				if reply.C != args.A*args.B {
					b.Fatalf("rpc error: Mul: expected %d got %d", reply.C, args.A*args.B)
				}
			}
			wg.Done()
		}()
	}
	wg.Wait()
	b.StopTimer()
}
Пример #14
0
func mGocodeCmdComplete(c *rpc.Client, fn string, src []byte, pos int) (res M, e string) {
	args := struct {
		Arg0 []byte
		Arg1 string
		Arg2 int
	}{src, fn, pos}

	reply := struct {
		Arg0 []candidate
		Arg1 int
	}{}

	if err := c.Call("RPC.RPC_auto_complete", &args, &reply); err != nil {
		e = "RPC error: " + err.Error()
	}

	completions := []M{}
	for _, d := range reply.Arg0 {
		completions = append(completions, M{
			"class": d.Class.String(),
			"type":  d.Type,
			"name":  d.Name,
		})
	}
	res = M{"completions": completions}

	return
}
Пример #15
0
func cmd_cursor_type_pkg(c *rpc.Client) {
	var args, reply int
	var err error
	args = 0
	err = c.Call("RPC.RPC_setid", &args, &reply)
	compl_id = reply
	compl_win, err = acme.Open(compl_id, nil)
	if err != nil {
		compl_win, _ = acme.New()
		args = compl_win.GetId()
		err = c.Call("RPC.RPC_setid", &args, &reply)
	}
	//for acme

	var src []byte
	var searchpos int
	var fname string

	if afile, err = acmeCurrentFile(); err != nil {
		fmt.Printf("%v", err)
	}
	fname, src, searchpos = afile.name, afile.body, afile.offset
	//for acme
	typ, pkg := client_cursor_type_pkg(c, src, fname, searchpos)
	fmt.Printf("%s,,%s\n", typ, pkg)
}
Пример #16
0
func homeHandler(stationdb *db.StationDB, m *rpc.Client,
	w http.ResponseWriter, r *http.Request, user userView) {
	log.Printf("userid: %s", user.Id)

	hc := homeContext{}

	n, err := stationdb.NumStations()
	if err != nil {
		n = -1
	}
	hc.NumStations = n

	hc.Stations, err = stationdb.UserStations(user.Id)
	if err != nil {
		log.Printf("Error getting user stations: %s", err.Error())
		// Continue rendering since it's not a critial error.
	}

	var args mux.StationCountArgs
	var count mux.StationCountResult
	err = m.Call("Coordinator.StationCount", args, &count)
	if err != nil {
		count.Count = -1
	}
	hc.NumOnlineStations = count.Count

	c := NewRenderContext(user, hc)
	err = homeTemplate.Get().ExecuteTemplate(w, "home.html", c)
	if err != nil {
		log.Printf("Error rendering home page: %s", err.Error())
		http.Error(w, "", http.StatusInternalServerError)
		return
	}
}
Пример #17
0
func Markdown(rc *rpc.Client, in, out *([]byte)) error {
	err := rc.Call("RPC.Markdown", in, out)
	if goutils.CheckErr(err) {
		return err
	}
	return nil
}
Пример #18
0
func Client() {
	var err error
	var c net.Conn
	c, err = net.DialTimeout("tcp", "127.0.0.1:9999", 1000*1000*1000*30)
	if err != nil {
		log.Fatal("dialing:", err)
	}

	var client *rpc.Client
	client = jsonrpc.NewClient(c)

	// 同步
	var a *PlusA = &PlusA{7, 8}
	var r *PlusR = new(PlusR)

	ch := make(chan int)
	for i := 0; i < 10000; i++ {
		go func() {
			client.Call("Test.Plus", a, r)
			<-ch
		}()
	}

	for j := 0; j < 10000; j++ {
		ch <- 1
		fmt.Println(r)
	}

	client.Close()
	c.Close()
}
Пример #19
0
func (ps *server) RPCRemoveDoc(client *rpc.Client, docId, myHostPort string) error {
	// Pear Server -> Pear Central: Removing Existing Client/Hostport combo
	tries := maxTries
	for tries > 0 {
		// Make RPC Call to Master
		args := &centralrpc.RemoveDocArgs{
			DocId:    docId,
			HostPort: myHostPort,
		}
		var reply centralrpc.RemoveDocReply
		if err := client.Call("PearCentral.RemoveDoc", args, &reply); err != nil {
			return err
		}
		// common.LOGV.Println("$Call Remove:",reply)
		// Check reply from Master
		if reply.Status == centralrpc.OK {
			delete(ps.docToServerMap, docId)
			return nil
		} else if reply.Status == centralrpc.DocNotExist {
			common.LOGE.Println("DocNotExist ", docId, " error")
			return nil
		}
		time.Sleep(time.Second)
		tries--
	}
	return errors.New("RPCRemoveDoc Failed.")
}
Пример #20
0
func main() {
	flag.Parse()

	var reply string
	var err error
	var client *rpc.Client

	client, err = rpc.Dial("tcp", fmt.Sprintf("%s:%d", *hostFlag, *portFlag))
	if err != nil {
		panic(err)
	}

	args := &control.ChannelArgs{"localhost", "#test"}

	err = client.Call("ControlServer.JoinChannel", args, &reply)
	if err != nil {
		panic(err)
	}

	<-time.After(time.Second * 10)

	err = client.Call("ControlServer.PartChannel", args, &reply)
	if err != nil {
		panic(err)
	}

	log.Println("Made RPC call")
}
Пример #21
0
//sends 1 byte to server and expects the same byte in return
func basicCall(c *rpc.Client) {
	args := BasicArg{1}
	var reply BasicArg
	err := c.Call("Arith.Echo", args, &reply)
	log.Printf("Arith: %d, %d", args.A, reply.A)
	checkError(err)
}
Пример #22
0
// PushCommands reads the command file
func pushCommands(dispatcher *rpc.Client) {

	infile, err := os.Open(*inpath)
	if err != nil {
		log.Fatalf("failed to open %s: %s\n", *inpath, err)
	}

	commands := make([]string, 0)

	scanner := bufio.NewScanner(infile)
	for scanner.Scan() {
		commands = append(commands, scanner.Text())
	}

	if *terminate {
		commands = append(commands, distribute.RemoteTerminationToken)
	}

	var res struct{}
	if *atomic {
		err = dispatcher.Call("JobAdder.PushCommandsAtomic", &commands, &res)
	} else {
		dispatcher.Call("JobAdder.PushCommands", &commands, &res)
	}

	if err != nil {
		log.Fatalf(err.Error())
	}
}
Пример #23
0
func doCatCmd(args []string, rpcClient *rpc.Client, user *user.User) {
	// parse flags
	flagSet := flag.NewFlagSet("cat", flag.ExitOnError)
	flagSet.Usage = subcmdUsage("cat", flagSet)
	var help_p *bool = flagSet.Bool("h", false, "help")
	var jobUser_p *string = flagSet.String("u", user.Username, "user")
	flagSet.Parse(args)

	if *help_p {
		flagSet.Usage()
		os.Exit(0)
	} else {
		// get job to cat
		if len(flagSet.Args()) == 0 {
			fmt.Fprintf(os.Stderr, "You must specify a job.\n")
			os.Exit(1)
		}
		var job string = flagSet.Args()[0]

		// check "-u" opt
		if *jobUser_p == "" {
			fmt.Fprintf(os.Stderr, "Option requires an argument: \"-u\"\n")
			os.Exit(1)
		}

		var result string
		arg := jobber.IpcArg{User: user.Username, Job: job, JobUser: *jobUser_p}
		err := rpcClient.Call("RealIpcServer.Cat", arg, &result)
		if err != nil {
			fmt.Fprintf(os.Stderr, "%v\n", err)
			os.Exit(1)
		}
		fmt.Printf("%v\n", result)
	}
}
Пример #24
0
func WaitForConnClose(client *rpc.Client) {
	var in, out bool
	err := client.Call("SchedRemote.WaitForConnClose", &in, &out)
	if err != nil {
		log.Panic("ERROR: Connection closed, exiting...")
	}
}
Пример #25
0
func revoke(client *rpc.Client, args *storagerpc.RevokeLeaseArgs, reply *storagerpc.RevokeLeaseReply, done chan int) {

	client.Call("LeaseCallbacks.RevokeLease", args, reply)
	if len(done) < 1 {
		done <- 1
	}
}
Пример #26
0
func benchmarkEchoProtoRPC(b *testing.B, size int) {
	var client *rpc.Client
	benchmarkEcho(b, size, listenAndServeProtoRPC,
		func(addr net.Addr) {
			conn, err := tls.Dial(addr.Network(), addr.String(), clientTLSConfig)
			if err != nil {
				b.Fatal(err)
			}
			client = rpc.NewClientWithCodec(NewClientCodec(conn))
		},
		func() {
			if err := client.Close(); err != nil {
				b.Fatal(err)
			}
		},
		func(echoMsg string) string {
			args := EchoRequest{Msg: echoMsg}
			reply := EchoResponse{}
			if err := client.Call("Echo.Echo", &args, &reply); err != nil {
				b.Fatal(err)
			}
			return reply.Msg
		},
	)
}
Пример #27
0
func (c *RpcCommandBackup) Execute(client *rpc.Client) (err error) {
	if c.path == "" {
		return errors.New("Missing path argument")
	}
	err = client.Call(c.RpcName(), c.path, &c.result)
	return
}
Пример #28
0
func cmd_auto_complete(c *rpc.Client) {
	var env gocode_env
	env.get()
	var args, reply int
	var err error
	args = 0
	err = c.Call("RPC.RPC_setid", &args, &reply)
	compl_id = reply
	compl_win, err = acme.Open(compl_id, nil)
	if err != nil {
		compl_win, _ = acme.New()
		args = compl_win.GetId()
		err = c.Call("RPC.RPC_setid", &args, &reply)
	}
	//for acme

	var src []byte
	var searchpos int
	var fname string

	if afile, err = acmeCurrentFile(); err != nil {
		fmt.Printf("%v", err)
	}
	fname, src, searchpos = afile.name, afile.body, afile.offset
	compl_win.Name("%v+completions", fname)
	compl_win.Addr(",")
	compl_win.Write("data", nil)
	//for acme

	write_candidates(client_auto_complete(c, src, fname, searchpos, env))
}
Пример #29
0
func (ls *Libstore) getServers(masterConnection *rpc.Client) error {
	lsplog.Vlogf(1, "Getting nodes from masters")
	args := new(storageproto.GetServersArgs)
	var reply storageproto.RegisterReply
	e := masterConnection.Call("StorageRPC.GetServers", &args, &reply)
	if e != nil {
		return e
	}
	numberOfTries := 0
	for !reply.Ready {
		time.Sleep(WAIT_FOR_STORAGE_SLEEP * time.Second)
		if numberOfTries < RETRY_THRESH {
			e := masterConnection.Call("StorageRPC.GetServers", &args, &reply)
			if e != nil {
				lsplog.Vlogf(3, "Connection to master failed")
				return e
			}
			numberOfTries++
		} else {
			return lsplog.MakeErr("Waited too long for storage server to be ready")
		}
	}
	lsplog.Vlogf(1, "servers %+v", reply.Servers)
	ls.servers = reply.Servers
	return nil
}
Пример #30
0
/**@brief store key-value into backend
 * @param key string
 * @param value string
 * @return error
 */
func (ls *Libstore) iPut(key, value string) error {
	var cli *rpc.Client
	var args storageproto.PutArgs = storageproto.PutArgs{key, value}
	var reply storageproto.PutReply
	var err error

	//lsplog.Vlogf(0, "libstore put %s->%s!", key, value)

	cli, err = ls.GetServer(key)
	if lsplog.CheckReport(1, err) {
		return err
	}

	//lsplog.Vlogf(0, "libstore getserver complete!")
	//lsplog.Vlogf(0, "put args %v\n", args)
	/*
	  fmt.Printf("put args %v\n", args)
	  fmt.Printf("here2\n")
	*/

	err = cli.Call("StorageRPC.Put", &args, &reply)
	if lsplog.CheckReport(1, err) {
		return err
	}

	//fmt.Printf("put reply %v\n", reply)
	//lsplog.Vlogf(0, "put reply %v\n", reply)

	if reply.Status != storageproto.OK {
		return MakeErr("Put()", reply.Status)
	}

	return nil
}