Exemplo n.º 1
0
// connect performs the actual connect() and rpc setup.
func (c *Connection) connect(ctx context.Context) error {
	c.log.Debug("Connection: dialing transport")

	// connect
	transport, err := c.transport.Dial(ctx)
	if err != nil {
		c.log.Warning("Connection: error dialing transport: %v", err)
		return err
	}

	client := rpc.NewClient(transport, c.errorUnwrapper)
	server := rpc.NewServer(transport, libkb.WrapError)

	// call the connect handler
	err = c.handler.OnConnect(ctx, c, client, server)
	if err != nil {
		c.log.Warning("Connection: error calling OnConnect handler: %v", err)
		return err
	}

	// set the client for other callers.
	// we wait to do this so the handler has time to do
	// any setup required, e.g. authenticate.
	c.mutex.Lock()
	defer c.mutex.Unlock()
	c.client = client
	c.server = server
	c.transport.Finalize()

	c.log.Debug("Connection: connected")
	return nil
}
Exemplo n.º 2
0
func (d *Service) Handle(c net.Conn) {
	xp := rpc.NewTransport(c, libkb.NewRPCLogFactory(), libkb.WrapError)

	server := rpc.NewServer(xp, libkb.WrapError)

	cl := make(chan error)
	server.AddCloseListener(cl)
	connID := d.G().NotifyRouter.AddConnection(xp, cl)

	if err := d.RegisterProtocols(server, xp, connID, d.G()); err != nil {
		d.G().Log.Warning("RegisterProtocols error: %s", err)
		return
	}

	if d.isDaemon {
		baseHandler := NewBaseHandler(xp)
		logUI := LogUI{sessionID: 0, cli: baseHandler.getLogUICli()}
		handle := d.G().Log.AddExternalLogger(&logUI)
		defer d.G().Log.RemoveExternalLogger(handle)
	}

	if err := server.Run(false /* bg */); err != nil {
		if err != io.EOF {
			d.G().Log.Warning("Run error: %s", err)
		}
	}
}
Exemplo n.º 3
0
func (d *Service) Handle(c net.Conn) {
	xp := rpc.NewTransport(c, libkb.NewRPCLogFactory(), libkb.WrapError)

	server := rpc.NewServer(xp, libkb.WrapError)

	cl := make(chan error)
	server.AddCloseListener(cl)
	connID := d.G().NotifyRouter.AddConnection(xp, cl)

	var logReg *logRegister
	if d.isDaemon {
		// Create a new log register object that the Log handler can use to
		// register a logger.  When this function finishes, the logger
		// will be removed.
		logReg = newLogRegister(d.logForwarder, d.G().Log)
		defer logReg.UnregisterLogger()
	}

	if err := d.RegisterProtocols(server, xp, connID, logReg, d.G()); err != nil {
		d.G().Log.Warning("RegisterProtocols error: %s", err)
		return
	}

	if err := server.Run(false /* bg */); err != nil {
		if err != io.EOF {
			d.G().Log.Warning("Run error: %s", err)
		}
	}

	d.G().Log.Debug("Handle() complete for connection %d", connID)
}
Exemplo n.º 4
0
func GetRPCServer(g *libkb.GlobalContext) (ret *rpc.Server, xp rpc.Transporter, err error) {
	if _, xp, err = g.GetSocket(false); err == nil {
		ret = rpc.NewServer(xp, libkb.WrapError)
	}
	if err != nil {
		DiagnoseSocketError(g.UI, err)
	}
	return
}
Exemplo n.º 5
0
func (p *provisioner) pickFirstConnection() (err error) {

	// This connection is auto-closed at the end of this function, so if
	// you don't want it to close, then set it to nil.  See the first
	// case in the select below.
	var conn net.Conn
	var xp rpc.Transporter

	defer func() {
		if conn != nil {
			conn.Close()
		}
	}()

	// Only make a channel if we were provided a secret to start it with.
	// If not, we'll just have to wait for a message on p.arg.SecretChannel
	// and use the provisionee's channel.
	if len(p.arg.Secret) != 0 {
		if conn, err = NewConn(p.arg.Mr, p.arg.Secret, p.deviceID, p.arg.Timeout); err != nil {
			return err
		}
		prot := keybase1.Kex2ProvisionerProtocol(p)
		xp = rpc.NewTransport(conn, p.arg.Provisioner.GetLogFactory(), nil)
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(prot); err != nil {
			return err
		}
		if err = srv.Run(true); err != nil {
			return err
		}
	}

	select {
	case <-p.start:
		p.conn = conn
		conn = nil // so it's not closed in the defer()'ed close
		p.xp = xp
	case sec := <-p.arg.SecretChannel:
		if len(sec) != SecretLen {
			return ErrBadSecret
		}
		if p.conn, err = NewConn(p.arg.Mr, sec, p.deviceID, p.arg.Timeout); err != nil {
			return err
		}
		p.xp = rpc.NewTransport(p.conn, p.arg.Provisioner.GetLogFactory(), nil)
	case <-p.arg.Ctx.Done():
		err = ErrCanceled
	case <-time.After(p.arg.Timeout):
		err = ErrTimedOut
	}
	return
}
Exemplo n.º 6
0
Arquivo: main.go Projeto: qbit/client
func (d *Service) Handle(c net.Conn) {
	xp := rpc.NewTransport(c, libkb.NewRPCLogFactory(d.G()), libkb.WrapError)

	server := rpc.NewServer(xp, libkb.WrapError)

	cl := make(chan error, 1)
	connID := d.G().NotifyRouter.AddConnection(xp, cl)

	var logReg *logRegister
	if d.isDaemon {
		// Create a new log register object that the Log handler can use to
		// register a logger.  When this function finishes, the logger
		// will be removed.
		logReg = newLogRegister(d.logForwarder, d.G().Log)
		defer logReg.UnregisterLogger()
	}
	shutdowners, err := d.RegisterProtocols(server, xp, connID, logReg, d.G())

	var shutdownOnce sync.Once
	shutdown := func() error {
		shutdownOnce.Do(func() {
			for _, shutdowner := range shutdowners {
				shutdowner.Shutdown()
			}
		})
		return nil
	}

	// Clean up handlers when the connection closes.
	defer shutdown()

	// Make sure shutdown is called when service shuts down but the connection
	// isn't closed yet.
	d.G().PushShutdownHook(shutdown)

	if err != nil {
		d.G().Log.Warning("RegisterProtocols error: %s", err)
		return
	}

	// Run the server and wait for it to finish.
	<-server.Run()
	// err is always non-nil.
	err = server.Err()
	cl <- err
	if err != io.EOF {
		d.G().Log.Warning("Run error: %s", err)
	}

	d.G().Log.Debug("Handle() complete for connection %d", connID)
}
Exemplo n.º 7
0
func (p *provisionee) startServer(s Secret) (err error) {
	if p.conn, err = NewConn(p.arg.Ctx, p.arg.Mr, s, p.deviceID, p.arg.Timeout); err != nil {
		return err
	}
	prot := keybase1.Kex2ProvisioneeProtocol(p)
	p.xp = rpc.NewTransport(p.conn, p.arg.Provisionee.GetLogFactory(), nil)
	srv := rpc.NewServer(p.xp, nil)
	if err = srv.Register(prot); err != nil {
		return err
	}

	p.server = srv
	p.serverDoneCh = srv.Run()
	return nil
}
Exemplo n.º 8
0
func TestTrackingNotifications(t *testing.T) {
	tc := setupTest(t, "signup")
	tc2 := cloneContext(tc)
	tc5 := cloneContext(tc)

	libkb.G.LocalDb = nil

	// Hack the various portions of the service that aren't
	// properly contextified.

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("sgnup")

	tui := trackingUI{
		signupUI: signupUI{
			info:         userInfo,
			Contextified: libkb.NewContextified(tc2.G),
		},
	}
	tc2.G.SetUI(&tui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	<-startCh

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState())

	nh := newTrackingNotifyHandler()

	// Launch the server that will listen for tracking notifications.
	launchServer := func(nh *trackingNotifyHandler) error {
		cli, xp, err := client.GetRPCClientWithContext(tc5.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.NotifyTrackingProtocol(nh)); err != nil {
			return err
		}
		ncli := keybase1.NotifyCtlClient{Cli: cli}
		if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{
			Tracking: true,
		}); err != nil {
			return err
		}
		return nil
	}

	// Actually launch it in the background
	go func() {
		err := launchServer(nh)
		if err != nil {
			nh.errCh <- err
		}
	}()

	// Have our test user track t_alice.
	trackCmd := client.NewCmdTrackRunner(tc2.G)
	trackCmd.SetUser("t_alice")
	trackCmd.SetOptions(keybase1.TrackOptions{BypassConfirm: true})
	err := trackCmd.Run()
	if err != nil {
		t.Fatal(err)
	}

	// Do a check for new tracking statements that should fire off a
	// notification. Currently the track command above does not fetch the new
	// chain link from the server, so this call is required. It's possible that
	// TrackEngine (or our signature caching code) might change in the future,
	// making this call unnecessary.
	checkTrackingCmd := client.NewCmdCheckTrackingRunner(tc2.G)
	err = checkTrackingCmd.Run()
	if err != nil {
		t.Fatal(err)
	}

	// Wait to get a notification back as we expect.
	// NOTE: If this test ever starts deadlocking here, it's possible that
	// we've changed how we cache signatures that we make on the local client,
	// in such a way that the fetch done by CheckTracking above doesn't find
	// any "isOwnNewLinkFromServer" links. If so, one way to fix this test
	// would be to blow away the local db before calling CheckTracking.
	tc.G.Log.Debug("Waiting for two tracking notifications.")
	for i := 0; i < 2; i++ {
		select {
		case err := <-nh.errCh:
			t.Fatalf("Error before notify: %v", err)
		case arg := <-nh.trackingCh:
			tAliceUID := keybase1.UID("295a7eea607af32040647123732bc819")
			tc.G.Log.Debug("Got tracking changed notification (%#v)", arg)
			if "t_alice" == arg.Username {
				if !tAliceUID.Equal(arg.Uid) {
					t.Fatalf("Bad UID back: %s != %s", tAliceUID, arg.Uid)
				}
			} else if userInfo.username == arg.Username {
				if !tc.G.Env.GetUID().Equal(arg.Uid) {
					t.Fatalf("Bad UID back: %s != %s", tc.G.Env.GetUID(), arg.Uid)
				}
			} else {
				t.Fatalf("Bad username back: %s != %s || %s", arg.Username, "t_alice", userInfo.username)
			}
		}
	}

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Exemplo n.º 9
0
func FixVersionClash(g *libkb.GlobalContext, cl libkb.CommandLine) (err error) {
	var cli keybase1.ConfigClient
	var ctlCli keybase1.CtlClient
	var serviceConfig keybase1.Config
	var socket net.Conn

	g.Log.Debug("+ FixVersionClash")
	defer func() {
		if socket != nil {
			socket.Close()
			socket = nil
		}
		g.Log.Debug("- FixVersionClash -> %v", err)
	}()

	// Make our own stack here, circumventing all of our libraries, so
	// as not to introduce any incompatibilities with earlier services
	// (like 1.0.8)
	socket, err = g.SocketInfo.DialSocket()
	if err != nil {
		g.Log.Debug("| Failed to DialSocket, but ignoring error: %s\n", err)
		return nil
	}
	xp := libkb.NewTransportFromSocket(g, socket)
	srv := rpc.NewServer(xp, libkb.WrapError)
	gcli := rpc.NewClient(xp, libkb.ErrorUnwrapper{})
	cli = keybase1.ConfigClient{Cli: gcli}
	srv.Register(NewLogUIProtocol())

	serviceConfig, err = cli.GetConfig(context.TODO(), 0)
	if err != nil {
		return err
	}
	g.Log.Debug("| Contacted service; got version: %s", serviceConfig.Version)

	// We'll check and restart the service if there is a new version.
	var semverClient, semverService semver.Version

	cliVersion := libkb.VersionString()
	if g.Env.GetRunMode() == libkb.DevelRunMode {
		tmp := os.Getenv("KEYBASE_SET_VERSION")
		if len(tmp) > 0 {
			cliVersion = tmp
		}
	}

	semverClient, err = semver.Make(cliVersion)
	if err != nil {
		return err
	}
	semverService, err = semver.Make(serviceConfig.Version)
	if err != nil {
		return err
	}

	g.Log.Debug("| version check %s v %s", semverClient, semverService)
	if semverClient.EQ(semverService) {
		g.Log.Debug("| versions check out")
		return nil
	} else if semverClient.LT(semverService) && semverClient.Major < semverService.Major {
		return fmt.Errorf("Unexpected version clash; client is at v%s, which is significantly *less than* server at v%s",
			semverClient, semverService)
	}

	g.Log.Warning("Restarting after upgrade; service is running v%s, while v%s is available",
		semverService, semverClient)

	origPid, err := getPid(g)
	if err != nil {
		g.Log.Warning("Failed to find pid for service: %v\n", err)
	}

	if serviceConfig.ForkType == keybase1.ForkType_LAUNCHD {
		return restartLaunchdService(g, serviceConfig.Label, g.Env.GetServiceInfoPath())
	}

	ctlCli = keybase1.CtlClient{Cli: gcli}
	err = ctlCli.Stop(context.TODO(), keybase1.StopArg{})
	if err != nil && origPid >= 0 {
		// A fallback approach. I haven't seen a need for it, but it can't really hurt.
		// If we fail to restart via Stop() then revert to kill techniques.

		g.Log.Warning("Error in Stopping %d via RPC: %v; trying fallback (kill via pidfile)", origPid, err)
		time.Sleep(time.Second)
		var newPid int
		newPid, err = getPid(g)
		if err != nil {
			g.Log.Warning("No pid; shutdown must have worked (%v)", err)
		} else if newPid != origPid {
			g.Log.Warning("New service found with pid=%d; assuming restart", newPid)
			return nil
		} else {
			if err = killPid(origPid); err != nil {
				g.Log.Warning("Kill via pidfile failed: %v\n", err)
				return err
			}
			g.Log.Warning("Successful kill() on pid=%d", origPid)
		}
	}

	socket.Close()
	socket = nil

	time.Sleep(10 * time.Millisecond)
	g.Log.Debug("Waiting for shutdown...")
	time.Sleep(1 * time.Second)

	if serviceConfig.ForkType == keybase1.ForkType_AUTO {
		g.Log.Info("Restarting service...")
		_, err = AutoForkServer(g, cl)
	}

	return err
}
Exemplo n.º 10
0
func TestSignupLogout(t *testing.T) {
	tc := setupTest(t, "signup")
	tc2 := cloneContext(tc)
	tc5 := cloneContext(tc)

	libkb.G.LocalDb = nil

	// Hack the various portions of the service that aren't
	// properly contextified.

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("sgnup")

	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	logout := client.NewCmdLogoutRunner(tc2.G)

	<-startCh

	nh := newNotifyHandler()

	// Launch the server that will listen for notifications on updates, such as logout
	launchServer := func(nh *notifyHandler) error {
		cli, xp, err := client.GetRPCClientWithContext(tc5.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.NotifySessionProtocol(nh)); err != nil {
			return err
		}
		if err = srv.Register(keybase1.NotifyUsersProtocol(nh)); err != nil {
			return err
		}
		ncli := keybase1.NotifyCtlClient{Cli: cli}
		if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{
			Session: true,
			Users:   true,
		}); err != nil {
			return err
		}
		return nil
	}

	// Actually launch it in the background
	go func() {
		err := launchServer(nh)
		if err != nil {
			nh.errCh <- err
		}
	}()

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState())
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case u := <-nh.loginCh:
		if u != userInfo.username {
			t.Fatalf("bad username in login notifcation: %q != %q", u, userInfo.username)
		}
		tc.G.Log.Debug("Got notification of login for %q", u)
	}

	btc := client.NewCmdBTCRunner(tc2.G)
	btc.SetAddress("1HUCBSJeHnkhzrVKVjaVmWg2QtZS1mdfaz")
	if err := btc.Run(); err != nil {
		t.Fatal(err)
	}

	// Now let's be sure that we get a notification back as we expect.
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case uid := <-nh.userCh:
		tc.G.Log.Debug("Got notification from user changed handled (%s)", uid)
		if e := libkb.CheckUIDAgainstUsername(uid, userInfo.username); e != nil {
			t.Fatalf("Bad UID back: %s != %s (%s)", uid, userInfo.username, e)
		}
	}

	// Fire a logout
	if err := logout.Run(); err != nil {
		t.Fatal(err)
	}

	// Now let's be sure that we get a notification back as we expect.
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case <-nh.logoutCh:
		tc.G.Log.Debug("Got notification from logout handler")
	}

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}

	// Check that we only get one notification, not two
	select {
	case _, ok := <-nh.logoutCh:
		if ok {
			t.Fatal("Received an extra logout notification!")
		}
	default:
	}

}
Exemplo n.º 11
0
func TestSecretUI(t *testing.T) {
	tc := setupTest(t, "secret_ui")
	tc1 := cloneContext(tc)
	tc2 := cloneContext(tc)

	// Make sure we're not using G anywhere in our tests.
	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	// Wait for the server to start up
	<-startCh

	var err error
	check := func() {
		if err != nil {
			t.Fatal(err)
		}
	}

	sui := newSecretUI()
	cli, xp, err := client.GetRPCClientWithContext(tc2.G)
	check()
	srv := rpc.NewServer(xp, nil)
	err = srv.Register(keybase1.SecretUiProtocol(sui))
	check()
	ncli := keybase1.DelegateUiCtlClient{Cli: cli}
	err = ncli.RegisterSecretUI(context.TODO())
	check()

	// run login command
	loginCmdUI := &loginCmdUI{
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(loginCmdUI)
	cmd := client.NewCmdLoginRunner(tc2.G)
	err = cmd.Run()
	if err == nil {
		t.Fatal("login worked, when it should have failed")
	}

	// check that delegate ui was called:
	if !sui.getKeybasePassphrase {
		t.Logf("secret ui: %+v", sui)
		t.Error("delegate secret UI GetKeybasePassphrase was not called during login cmd")
	}

	stopper := client.NewCmdCtlStopRunner(tc1.G)
	if err := stopper.Run(); err != nil {
		t.Errorf("Error in stopping service: %v", err)
	}

	// If the server failed, it's also an error
	err = <-stopCh
	check()
}
Exemplo n.º 12
0
func TestGregorForwardToElectron(t *testing.T) {
	tc := setupTest(t, "gregor")
	defer tc.Cleanup()
	tc1 := cloneContext(tc)

	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	stopCh := make(chan error)
	go func() {
		tc.G.Log.Debug("+ Service.Run")
		err := svc.Run()
		tc.G.Log.Debug("- Service.Run")
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("grgr")
	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc.G),
	}
	tc.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc.G)
	signup.SetTest()

	// Wait for the server to start up
	<-startCh

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc.G.Log.Debug("Login State: %v", tc.G.LoginState())

	var err error
	check := func() {
		if err != nil {
			t.Fatal(err)
		}
	}
	cli, xp, err := client.GetRPCClientWithContext(tc1.G)
	srv := rpc.NewServer(xp, nil)
	em := newElectronMock(tc.G)
	err = srv.Register(keybase1.GregorUIProtocol(em))
	check()
	ncli := keybase1.DelegateUiCtlClient{Cli: cli}

	// Spin until gregor comes up; it should come up after signup
	var ok bool
	for i := 0; !ok && i < 40; i++ {
		if ok = svc.HasGregor(); !ok {
			time.Sleep(50 * time.Millisecond)
		} else {
			tc.G.Log.Debug("spinning, waiting for gregor to come up (attempt %d)", i)
		}
	}
	if !ok {
		t.Fatal("Gregor never came up after we signed up")
	}

	svc.SetGregorPushStateFilter(func(m gregor.Message) bool {
		cat := m.ToInBandMessage().ToStateUpdateMessage().Creation().Category()
		return cat.String() != "user.identity_change" && cat.String() != "user.key_change"
	})
	err = ncli.RegisterGregorFirehose(context.TODO())
	check()

	select {
	case a := <-em.stateCh:
		if a.Reason != keybase1.PushReason_RECONNECTED {
			t.Fatal(fmt.Sprintf("got wrong reason: %v", a.Reason))
		}
		if d := len(filterPubsubdItems(a.State.Items_)); d != 0 {
			t.Fatal(fmt.Sprintf("Wrong number of items in state -- should have 0, but got %d", d))
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got a reconnect message")
	}

	msgID, err := svc.GregorInject("foo", []byte("bar"))
	check()
	err = svc.GregorInjectOutOfBandMessage("baz", []byte("bip"))
	check()

	checkState := func(s gregor1.State) {
		items := filterPubsubdItems(s.Items_)
		if n := len(items); n != 1 {
			t.Errorf("Expected one item back; got %d", n)
			return
		}
		i := items[0]
		if !bytes.Equal(i.Md_.MsgID_.Bytes(), msgID.Bytes()) {
			t.Error("Wrong gregor message ID received")
		}
		if i.Item_.Category_.String() != "foo" {
			t.Error("Wrong gregor category")
		}
		if string(i.Item_.Body_.Bytes()) != "bar" {
			t.Error("Wrong gregor body")
		}
	}

	select {
	case pushArg := <-em.stateCh:
		checkState(pushArg.State)
		if pushArg.Reason != keybase1.PushReason_NEW_DATA {
			t.Errorf("wrong reason for push: %v", pushArg.Reason)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an IBM")
	}

	select {
	case oobm := <-em.oobmCh:
		if oobm.System_ != "baz" {
			t.Fatalf("Got wrong OOBM system: %s", oobm.System_)
		}
		if s := string(oobm.Body_); s != "bip" {
			t.Fatalf("Got wrong OOBM body: %s", s)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an OOBM")
	}

	svc.SimulateGregorCrashForTesting()
	select {
	case pushArg := <-em.stateCh:
		checkState(pushArg.State)
		if pushArg.Reason != keybase1.PushReason_RECONNECTED {
			t.Errorf("wrong reason for push: %v", pushArg.Reason)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an IBM")
	}

	gcli := keybase1.GregorClient{Cli: cli}
	state, err := gcli.GetState(context.TODO())
	check()
	checkState(state)

	if err := client.CtlServiceStop(tc.G); err != nil {
		t.Fatal(err)
	}
	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}

}
Exemplo n.º 13
0
func TestDelegateUI(t *testing.T) {
	tc := setupTest(t, "delegate_ui")
	tc1 := cloneContext(tc)
	tc2 := cloneContext(tc)

	// Make sure we're not using G anywhere in our tests.
	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	// Wait for the server to start up
	<-startCh

	dui := newDelegateUI()

	launchDelegateUI := func(dui *delegateUI) error {
		cli, xp, err := client.GetRPCClientWithContext(tc2.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.IdentifyUiProtocol(dui)); err != nil {
			return err
		}
		ncli := keybase1.DelegateUiCtlClient{Cli: cli}
		if err = ncli.RegisterIdentifyUI(context.TODO()); err != nil {
			return err
		}
		return nil
	}

	// Launch the delegate UI
	if err := launchDelegateUI(dui); err != nil {
		t.Fatal(err)
	}

	id := client.NewCmdIDRunner(tc1.G)
	id.SetUser("t_alice")
	id.UseDelegateUI()
	if err := id.Run(); err != nil {
		t.Errorf("Error in Run: %v", err)
	}

	// We should get either a 'done' or an 'error' from the delegateUI.
	err, ok := <-dui.ch
	if err != nil {
		t.Errorf("Error with delegate UI: %v", err)
	} else if ok {
		t.Errorf("Delegate UI didn't close the channel properly")
	} else if err = dui.checkSuccess(); err != nil {
		t.Error(err)
	}

	stopper := client.NewCmdCtlStopRunner(tc1.G)
	if err := stopper.Run(); err != nil {
		t.Errorf("Error in stopping service: %v", err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}