func TestTrackingNotifications(t *testing.T) { tc := setupTest(t, "signup") tc2 := cloneContext(tc) tc5 := cloneContext(tc) libkb.G.LocalDb = nil // Hack the various portions of the service that aren't // properly contextified. defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() userInfo := randomUser("sgnup") tui := trackingUI{ signupUI: signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), }, } tc2.G.SetUI(&tui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() <-startCh if err := signup.Run(); err != nil { t.Fatal(err) } tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState()) nh := newTrackingNotifyHandler() // Launch the server that will listen for tracking notifications. launchServer := func(nh *trackingNotifyHandler) error { cli, xp, err := client.GetRPCClientWithContext(tc5.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.NotifyTrackingProtocol(nh)); err != nil { return err } ncli := keybase1.NotifyCtlClient{Cli: cli} if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{ Tracking: true, }); err != nil { return err } return nil } // Actually launch it in the background go func() { err := launchServer(nh) if err != nil { nh.errCh <- err } }() // Have our test user track t_alice. trackCmd := client.NewCmdTrackRunner(tc2.G) trackCmd.SetUser("t_alice") trackCmd.SetOptions(keybase1.TrackOptions{BypassConfirm: true}) err := trackCmd.Run() if err != nil { t.Fatal(err) } // Do a check for new tracking statements that should fire off a // notification. Currently the track command above does not fetch the new // chain link from the server, so this call is required. It's possible that // TrackEngine (or our signature caching code) might change in the future, // making this call unnecessary. checkTrackingCmd := client.NewCmdCheckTrackingRunner(tc2.G) err = checkTrackingCmd.Run() if err != nil { t.Fatal(err) } // Wait to get a notification back as we expect. // NOTE: If this test ever starts deadlocking here, it's possible that // we've changed how we cache signatures that we make on the local client, // in such a way that the fetch done by CheckTracking above doesn't find // any "isOwnNewLinkFromServer" links. If so, one way to fix this test // would be to blow away the local db before calling CheckTracking. tc.G.Log.Debug("Waiting for two tracking notifications.") for i := 0; i < 2; i++ { select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case arg := <-nh.trackingCh: tAliceUID := keybase1.UID("295a7eea607af32040647123732bc819") tc.G.Log.Debug("Got tracking changed notification (%#v)", arg) if "t_alice" == arg.Username { if !tAliceUID.Equal(arg.Uid) { t.Fatalf("Bad UID back: %s != %s", tAliceUID, arg.Uid) } } else if userInfo.username == arg.Username { if !tc.G.Env.GetUID().Equal(arg.Uid) { t.Fatalf("Bad UID back: %s != %s", tc.G.Env.GetUID(), arg.Uid) } } else { t.Fatalf("Bad username back: %s != %s || %s", arg.Username, "t_alice", userInfo.username) } } } if err := client.CtlServiceStop(tc2.G); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestSignupLogout(t *testing.T) { tc := setupTest(t, "signup") tc2 := cloneContext(tc) tc5 := cloneContext(tc) libkb.G.LocalDb = nil // Hack the various portions of the service that aren't // properly contextified. defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() userInfo := randomUser("sgnup") sui := signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(&sui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() logout := client.NewCmdLogoutRunner(tc2.G) <-startCh nh := newNotifyHandler() // Launch the server that will listen for notifications on updates, such as logout launchServer := func(nh *notifyHandler) error { cli, xp, err := client.GetRPCClientWithContext(tc5.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.NotifySessionProtocol(nh)); err != nil { return err } if err = srv.Register(keybase1.NotifyUsersProtocol(nh)); err != nil { return err } ncli := keybase1.NotifyCtlClient{Cli: cli} if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{ Session: true, Users: true, }); err != nil { return err } return nil } // Actually launch it in the background go func() { err := launchServer(nh) if err != nil { nh.errCh <- err } }() if err := signup.Run(); err != nil { t.Fatal(err) } tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState()) select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case u := <-nh.loginCh: if u != userInfo.username { t.Fatalf("bad username in login notifcation: %q != %q", u, userInfo.username) } tc.G.Log.Debug("Got notification of login for %q", u) } btc := client.NewCmdBTCRunner(tc2.G) btc.SetAddress("1HUCBSJeHnkhzrVKVjaVmWg2QtZS1mdfaz") if err := btc.Run(); err != nil { t.Fatal(err) } // Now let's be sure that we get a notification back as we expect. select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case uid := <-nh.userCh: tc.G.Log.Debug("Got notification from user changed handled (%s)", uid) if e := libkb.CheckUIDAgainstUsername(uid, userInfo.username); e != nil { t.Fatalf("Bad UID back: %s != %s (%s)", uid, userInfo.username, e) } } // Fire a logout if err := logout.Run(); err != nil { t.Fatal(err) } // Now let's be sure that we get a notification back as we expect. select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case <-nh.logoutCh: tc.G.Log.Debug("Got notification from logout handler") } if err := client.CtlServiceStop(tc2.G); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } // Check that we only get one notification, not two select { case _, ok := <-nh.logoutCh: if ok { t.Fatal("Received an extra logout notification!") } default: } }
func TestGregorForwardToElectron(t *testing.T) { tc := setupTest(t, "gregor") defer tc.Cleanup() tc1 := cloneContext(tc) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() stopCh := make(chan error) go func() { tc.G.Log.Debug("+ Service.Run") err := svc.Run() tc.G.Log.Debug("- Service.Run") if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() userInfo := randomUser("grgr") sui := signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc.G), } tc.G.SetUI(&sui) signup := client.NewCmdSignupRunner(tc.G) signup.SetTest() // Wait for the server to start up <-startCh if err := signup.Run(); err != nil { t.Fatal(err) } tc.G.Log.Debug("Login State: %v", tc.G.LoginState()) var err error check := func() { if err != nil { t.Fatal(err) } } cli, xp, err := client.GetRPCClientWithContext(tc1.G) srv := rpc.NewServer(xp, nil) em := newElectronMock(tc.G) err = srv.Register(keybase1.GregorUIProtocol(em)) check() ncli := keybase1.DelegateUiCtlClient{Cli: cli} // Spin until gregor comes up; it should come up after signup var ok bool for i := 0; !ok && i < 40; i++ { if ok = svc.HasGregor(); !ok { time.Sleep(50 * time.Millisecond) } else { tc.G.Log.Debug("spinning, waiting for gregor to come up (attempt %d)", i) } } if !ok { t.Fatal("Gregor never came up after we signed up") } svc.SetGregorPushStateFilter(func(m gregor.Message) bool { cat := m.ToInBandMessage().ToStateUpdateMessage().Creation().Category() return cat.String() != "user.identity_change" && cat.String() != "user.key_change" }) err = ncli.RegisterGregorFirehose(context.TODO()) check() select { case a := <-em.stateCh: if a.Reason != keybase1.PushReason_RECONNECTED { t.Fatal(fmt.Sprintf("got wrong reason: %v", a.Reason)) } if d := len(filterPubsubdItems(a.State.Items_)); d != 0 { t.Fatal(fmt.Sprintf("Wrong number of items in state -- should have 0, but got %d", d)) } case <-time.After(3 * time.Second): t.Fatalf("never got a reconnect message") } msgID, err := svc.GregorInject("foo", []byte("bar")) check() err = svc.GregorInjectOutOfBandMessage("baz", []byte("bip")) check() checkState := func(s gregor1.State) { items := filterPubsubdItems(s.Items_) if n := len(items); n != 1 { t.Errorf("Expected one item back; got %d", n) return } i := items[0] if !bytes.Equal(i.Md_.MsgID_.Bytes(), msgID.Bytes()) { t.Error("Wrong gregor message ID received") } if i.Item_.Category_.String() != "foo" { t.Error("Wrong gregor category") } if string(i.Item_.Body_.Bytes()) != "bar" { t.Error("Wrong gregor body") } } select { case pushArg := <-em.stateCh: checkState(pushArg.State) if pushArg.Reason != keybase1.PushReason_NEW_DATA { t.Errorf("wrong reason for push: %v", pushArg.Reason) } case <-time.After(3 * time.Second): t.Fatalf("never got an IBM") } select { case oobm := <-em.oobmCh: if oobm.System_ != "baz" { t.Fatalf("Got wrong OOBM system: %s", oobm.System_) } if s := string(oobm.Body_); s != "bip" { t.Fatalf("Got wrong OOBM body: %s", s) } case <-time.After(3 * time.Second): t.Fatalf("never got an OOBM") } svc.SimulateGregorCrashForTesting() select { case pushArg := <-em.stateCh: checkState(pushArg.State) if pushArg.Reason != keybase1.PushReason_RECONNECTED { t.Errorf("wrong reason for push: %v", pushArg.Reason) } case <-time.After(3 * time.Second): t.Fatalf("never got an IBM") } gcli := keybase1.GregorClient{Cli: cli} state, err := gcli.GetState(context.TODO()) check() checkState(state) if err := client.CtlServiceStop(tc.G); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestSecretUI(t *testing.T) { tc := setupTest(t, "secret_ui") tc1 := cloneContext(tc) tc2 := cloneContext(tc) // Make sure we're not using G anywhere in our tests. libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() // Wait for the server to start up <-startCh var err error check := func() { if err != nil { t.Fatal(err) } } sui := newSecretUI() cli, xp, err := client.GetRPCClientWithContext(tc2.G) check() srv := rpc.NewServer(xp, nil) err = srv.Register(keybase1.SecretUiProtocol(sui)) check() ncli := keybase1.DelegateUiCtlClient{Cli: cli} err = ncli.RegisterSecretUI(context.TODO()) check() // run login command loginCmdUI := &loginCmdUI{ Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(loginCmdUI) cmd := client.NewCmdLoginRunner(tc2.G) err = cmd.Run() if err == nil { t.Fatal("login worked, when it should have failed") } // check that delegate ui was called: if !sui.getKeybasePassphrase { t.Logf("secret ui: %+v", sui) t.Error("delegate secret UI GetKeybasePassphrase was not called during login cmd") } stopper := client.NewCmdCtlStopRunner(tc1.G) if err := stopper.Run(); err != nil { t.Errorf("Error in stopping service: %v", err) } // If the server failed, it's also an error err = <-stopCh check() }
func TestDelegateUI(t *testing.T) { tc := setupTest(t, "delegate_ui") tc1 := cloneContext(tc) tc2 := cloneContext(tc) // Make sure we're not using G anywhere in our tests. libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() // Wait for the server to start up <-startCh dui := newDelegateUI() launchDelegateUI := func(dui *delegateUI) error { cli, xp, err := client.GetRPCClientWithContext(tc2.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.IdentifyUiProtocol(dui)); err != nil { return err } ncli := keybase1.DelegateUiCtlClient{Cli: cli} if err = ncli.RegisterIdentifyUI(context.TODO()); err != nil { return err } return nil } // Launch the delegate UI if err := launchDelegateUI(dui); err != nil { t.Fatal(err) } id := client.NewCmdIDRunner(tc1.G) id.SetUser("t_alice") id.UseDelegateUI() if err := id.Run(); err != nil { t.Errorf("Error in Run: %v", err) } // We should get either a 'done' or an 'error' from the delegateUI. err, ok := <-dui.ch if err != nil { t.Errorf("Error with delegate UI: %v", err) } else if ok { t.Errorf("Delegate UI didn't close the channel properly") } else if err = dui.checkSuccess(); err != nil { t.Error(err) } stopper := client.NewCmdCtlStopRunner(tc1.G) if err := stopper.Run(); err != nil { t.Errorf("Error in stopping service: %v", err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }