func TestRPCs(t *testing.T) { tc := setupTest(t, "resolve2") tc2 := cloneContext(tc) libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() stopper := client.NewCmdCtlStopRunner(tc2.G) <-startCh // Add test RPC methods here. testIdentifyResolve2(t, tc2.G) if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestStop(t *testing.T) { tc := setupTest(t, "stop") defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(false, tc.G) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("hit an error in Run, which might be masked: %v", err) } stopCh <- err }() tc2 := cloneContext(tc) <-startCh stopper := client.NewCmdCtlStopRunner(tc2.G) if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestVersionAndStop(t *testing.T) { tc := setupTest(t, "stop") defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("hit an error in Run, which might be masked: %v", err) } stopCh <- err }() tc2 := cloneContext(tc) vui := versionUI{ Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(&vui) <-startCh version := client.NewCmdVersionRunner(tc2.G) if err := version.Run(); err != nil { t.Fatal(err) } vui.checkVersionOutput(t) stopper := client.NewCmdCtlStopRunner(tc2.G) if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestSignupLogout(t *testing.T) { tc := setupTest(t, "signup") tc2 := cloneContext(tc) tc5 := cloneContext(tc) libkb.G.LocalDb = nil // Hack the various portions of the service that aren't // properly contextified. defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(false, tc.G) startCh := svc.GetStartChannel() go func() { stopCh <- svc.Run() }() userInfo := randomUser("sgnup") sui := signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(&sui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() logout := client.NewCmdLogoutRunner(tc2.G) stopper := client.NewCmdCtlStopRunner(tc2.G) <-startCh if err := signup.Run(); err != nil { t.Fatal(err) } tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState()) nh := newNotifyHandler() // Launch the server that will listen for notifications on updates, such as logout launchServer := func(nh *notifyHandler) error { cli, xp, err := client.GetRPCClientWithContext(tc5.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.NotifySessionProtocol(nh)); err != nil { return err } if err = srv.Register(keybase1.NotifyUsersProtocol(nh)); err != nil { return err } ncli := keybase1.NotifyCtlClient{Cli: cli} if err = ncli.ToggleNotifications(context.TODO(), keybase1.NotificationChannels{ Session: true, Users: true, }); err != nil { return err } return nil } // Actually launch it in the background go func() { err := launchServer(nh) if err != nil { nh.errCh <- err } }() btc := client.NewCmdBTCRunner(tc2.G) btc.SetAddress("1HUCBSJeHnkhzrVKVjaVmWg2QtZS1mdfaz") if err := btc.Run(); err != nil { t.Fatal(err) } // Now let's be sure that we get a notification back as we expect. select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case uid := <-nh.userCh: tc.G.Log.Debug("Got notification from user changed handled (%s)", uid) if e := libkb.CheckUIDAgainstUsername(uid, userInfo.username); e != nil { t.Fatalf("Bad UID back: %s != %s (%s)", uid, userInfo.username, e) } } // Fire a logout if err := logout.Run(); err != nil { t.Fatal(err) } // Now let's be sure that we get a notification back as we expect. select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case <-nh.logoutCh: tc.G.Log.Debug("Got notification from logout handler") } if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestPassphraseRecover(t *testing.T) { tc := setupTest(t, "pp") tc2 := cloneContext(tc) libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() <-startCh userInfo := randomUser("pp") sui := signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(&sui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() stopper := client.NewCmdCtlStopRunner(tc2.G) if err := signup.Run(); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(userInfo.passphrase); err != nil { t.Fatal(err) } // logout before recovering passphrase logout := client.NewCmdLogoutRunner(tc2.G) if err := logout.Run(); err != nil { t.Fatal(err) } // the paper key displayed during signup is in userInfo now, and it will be used // during passphrase recovery tc.G.Log.Debug("signup paper key: %s", userInfo.displayedPaperKey) oldPassphrase := userInfo.passphrase newPassphrase := userInfo.passphrase + userInfo.passphrase sui.info.passphrase = newPassphrase recoverCmd := client.NewCmdPassphraseRecoverRunner(tc2.G) if err := recoverCmd.Run(); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(oldPassphrase); err == nil { t.Fatal("old passphrase passed verification after passphrase change") } if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestPassphraseChange(t *testing.T) { tc := setupTest(t, "pp") tc2 := cloneContext(tc) libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() <-startCh userInfo := randomUser("pp") sui := signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(&sui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() stopper := client.NewCmdCtlStopRunner(tc2.G) if err := signup.Run(); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(userInfo.passphrase); err != nil { t.Fatal(err) } oldPassphrase := userInfo.passphrase newPassphrase := userInfo.passphrase + userInfo.passphrase sui.info.passphrase = newPassphrase change := client.NewCmdPassphraseChangeRunner(tc2.G) if err := change.Run(); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase); err != nil { t.Fatal(err) } if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(oldPassphrase); err == nil { t.Fatal("old passphrase passed verification after passphrase change") } if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestSecretUI(t *testing.T) { tc := setupTest(t, "secret_ui") tc1 := cloneContext(tc) tc2 := cloneContext(tc) // Make sure we're not using G anywhere in our tests. libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() // Wait for the server to start up <-startCh var err error check := func() { if err != nil { t.Fatal(err) } } sui := newSecretUI() cli, xp, err := client.GetRPCClientWithContext(tc2.G) check() srv := rpc.NewServer(xp, nil) err = srv.Register(keybase1.SecretUiProtocol(sui)) check() ncli := keybase1.DelegateUiCtlClient{Cli: cli} err = ncli.RegisterSecretUI(context.TODO()) check() // run login command loginCmdUI := &loginCmdUI{ Contextified: libkb.NewContextified(tc2.G), } tc2.G.SetUI(loginCmdUI) cmd := client.NewCmdLoginRunner(tc2.G) err = cmd.Run() if err == nil { t.Fatal("login worked, when it should have failed") } // check that delegate ui was called: if !sui.getKeybasePassphrase { t.Logf("secret ui: %+v", sui) t.Error("delegate secret UI GetKeybasePassphrase was not called during login cmd") } stopper := client.NewCmdCtlStopRunner(tc1.G) if err := stopper.Run(); err != nil { t.Errorf("Error in stopping service: %v", err) } // If the server failed, it's also an error err = <-stopCh check() }
func TestDelegateUI(t *testing.T) { tc := setupTest(t, "delegate_ui") tc1 := cloneContext(tc) tc2 := cloneContext(tc) // Make sure we're not using G anywhere in our tests. libkb.G.LocalDb = nil defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() // Wait for the server to start up <-startCh dui := newDelegateUI() launchDelegateUI := func(dui *delegateUI) error { cli, xp, err := client.GetRPCClientWithContext(tc2.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.IdentifyUiProtocol(dui)); err != nil { return err } ncli := keybase1.DelegateUiCtlClient{Cli: cli} if err = ncli.RegisterIdentifyUI(context.TODO()); err != nil { return err } return nil } // Launch the delegate UI if err := launchDelegateUI(dui); err != nil { t.Fatal(err) } id := client.NewCmdIDRunner(tc1.G) id.SetUser("t_alice") id.UseDelegateUI() if err := id.Run(); err != nil { t.Errorf("Error in Run: %v", err) } // We should get either a 'done' or an 'error' from the delegateUI. err, ok := <-dui.ch if err != nil { t.Errorf("Error with delegate UI: %v", err) } else if ok { t.Errorf("Delegate UI didn't close the channel properly") } else if err = dui.checkSuccess(); err != nil { t.Error(err) } stopper := client.NewCmdCtlStopRunner(tc1.G) if err := stopper.Run(); err != nil { t.Errorf("Error in stopping service: %v", err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }
func TestTrackingNotifications(t *testing.T) { tc := setupTest(t, "signup") tc2 := cloneContext(tc) tc5 := cloneContext(tc) libkb.G.LocalDb = nil // Hack the various portions of the service that aren't // properly contextified. defer tc.Cleanup() stopCh := make(chan error) svc := service.NewService(tc.G, false) startCh := svc.GetStartChannel() go func() { err := svc.Run() if err != nil { t.Logf("Running the service produced an error: %v", err) } stopCh <- err }() userInfo := randomUser("sgnup") tui := trackingUI{ signupUI: signupUI{ info: userInfo, Contextified: libkb.NewContextified(tc2.G), }, } tc2.G.SetUI(&tui) signup := client.NewCmdSignupRunner(tc2.G) signup.SetTest() <-startCh if err := signup.Run(); err != nil { t.Fatal(err) } tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState()) nh := newTrackingNotifyHandler() // Launch the server that will listen for tracking notifications. launchServer := func(nh *trackingNotifyHandler) error { cli, xp, err := client.GetRPCClientWithContext(tc5.G) if err != nil { return err } srv := rpc.NewServer(xp, nil) if err = srv.Register(keybase1.NotifyTrackingProtocol(nh)); err != nil { return err } ncli := keybase1.NotifyCtlClient{Cli: cli} if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{ Tracking: true, }); err != nil { return err } return nil } // Actually launch it in the background go func() { err := launchServer(nh) if err != nil { nh.errCh <- err } }() // Have our test user track t_alice. trackCmd := client.NewCmdTrackRunner(tc2.G) trackCmd.SetUser("t_alice") trackCmd.SetOptions(keybase1.TrackOptions{BypassConfirm: true}) err := trackCmd.Run() if err != nil { t.Fatal(err) } // Do a check for new tracking statements that should fire off a // notification. Currently the track command above does not fetch the new // chain link from the server, so this call is required. It's possible that // TrackEngine (or our signature caching code) might change in the future, // making this call unnecessary. checkTrackingCmd := client.NewCmdCheckTrackingRunner(tc2.G) err = checkTrackingCmd.Run() if err != nil { t.Fatal(err) } // Wait to get a notification back as we expect. // NOTE: If this test ever starts deadlocking here, it's possible that // we've changed how we cache signatures that we make on the local client, // in such a way that the fetch done by CheckTracking above doesn't find // any "isOwnNewLinkFromServer" links. If so, one way to fix this test // would be to blow away the local db before calling CheckTracking. tc.G.Log.Debug("Waiting for tracking notification.") select { case err := <-nh.errCh: t.Fatalf("Error before notify: %v", err) case arg := <-nh.trackingCh: tAliceUID := keybase1.UID("295a7eea607af32040647123732bc819") tc.G.Log.Debug("Got tracking changed notification (%#v)", arg) if "t_alice" != arg.Username { t.Fatalf("Bad username back: %s != %s", "t_alice", arg.Username) } if !tAliceUID.Equal(arg.Uid) { t.Fatalf("Bad UID back: %s != %s", tAliceUID, arg.Uid) } } stopper := client.NewCmdCtlStopRunner(tc2.G) if err := stopper.Run(); err != nil { t.Fatal(err) } // If the server failed, it's also an error if err := <-stopCh; err != nil { t.Fatal(err) } }