Ejemplo n.º 1
0
func TestConfigGetAndSet(t *testing.T) {

	tc := setupTest(t, "stop")

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("hit an error in Run, which might be masked: %v", err)
		}
		stopCh <- err
	}()

	tc2 := cloneContext(tc)

	<-startCh

	testConfigGetAndSet(t, tc2.G)

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 2
0
func TestRPCs(t *testing.T) {
	tc := setupTest(t, "resolve2")
	tc2 := cloneContext(tc)

	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	stopper := client.NewCmdCtlStopRunner(tc2.G)

	<-startCh

	// Add test RPC methods here.
	testIdentifyResolve2(t, tc2.G)

	if err := stopper.Run(); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 3
0
// ForkServer forks a new background Keybase service, and waits until it's
// pingable. It will only do something useful on Unixes; it won't work on
// Windows (probably?). Returns an error if anything bad happens; otherwise,
// assume that the server was successfully started up. Returns (true, nil) if
// the server was actually forked, or (false, nil) if it was previously up.
func ForkServer(g *libkb.GlobalContext, cl libkb.CommandLine, forkType keybase1.ForkType) (bool, error) {
	srv := service.NewService(g, true /* isDaemon */)
	forked := false

	// If we try to get an exclusive lock and succeed, it means we
	// need to relaunch the daemon since it's dead
	g.Log.Debug("Getting flock")
	err := srv.GetExclusiveLockWithoutAutoUnlock()
	if err == nil {
		g.Log.Debug("Flocked! Server must have died")
		srv.ReleaseLock()
		_, err = spawnServer(g, cl, forkType)
		if err != nil {
			g.Log.Errorf("Error in spawning server process: %s", err)
			return false, err
		}
		err = pingLoop(g)
		if err != nil {
			g.Log.Errorf("Ping failure after server fork: %s", err)
			return false, err
		}
		forked = true
	} else {
		g.Log.Debug("The server is still up")
		err = nil
	}

	return forked, err
}
Ejemplo n.º 4
0
// ForkServer forks a new background Keybase service, and waits until it's
// pingable. It will only do something useful on Unixes; it won't work on
// Windows (probably?). Returns an error if anything bad happens; otherwise,
// assume that the server was successfully started up.
func ForkServer(cl libkb.CommandLine, g *libkb.GlobalContext) error {
	srv := service.NewService(true /* isDaemon */, g)

	// If we try to get an exclusive lock and succeed, it means we
	// need to relaunch the daemon since it's dead
	g.Log.Debug("Getting flock")
	err := srv.GetExclusiveLockWithoutAutoUnlock()
	if err == nil {
		g.Log.Debug("Flocked! Server must have died")
		srv.ReleaseLock()
		err = spawnServer(cl)
		if err != nil {
			g.Log.Errorf("Error in spawning server process: %s", err)
			return err
		}
		err = pingLoop()
		if err != nil {
			g.Log.Errorf("Ping failure after server fork: %s", err)
			return err
		}
	} else {
		g.Log.Debug("The server is still up")
		err = nil
	}

	return err
}
Ejemplo n.º 5
0
// Init runs the Keybase services
func Init(homeDir string, logFile string, runModeStr string, accessGroupOverride bool) error {
	fmt.Println("Go: Initializing")
	fmt.Printf("Go: Using log: %s\n", logFile)
	kbCtx = libkb.G
	kbCtx.Init()
	usage := libkb.Usage{
		Config:    true,
		API:       true,
		KbKeyring: true,
	}
	runMode, err := libkb.StringToRunMode(runModeStr)
	if err != nil {
		return err
	}
	config := libkb.AppConfig{
		HomeDir:                     homeDir,
		LogFile:                     logFile,
		RunMode:                     runMode,
		Debug:                       true,
		LocalRPCDebug:               "Acsvip",
		SecurityAccessGroupOverride: accessGroupOverride,
	}
	err = kbCtx.Configure(config, usage)
	if err != nil {
		return err
	}

	svc := service.NewService(kbCtx, false)
	svc.StartLoopbackServer()
	kbCtx.SetService()
	kbCtx.SetUIRouter(service.NewUIRouter(kbCtx))

	serviceLog := config.GetLogFile()
	logs := libkb.Logs{
		Service: serviceLog,
	}

	logSendContext = libkb.LogSendContext{
		Contextified: libkb.NewContextified(kbCtx),
		Logs:         logs,
	}

	// FIXME (MBG): This is causing RPC responses to sometimes not be recieved
	// on iOS. Repro by hooking up getExtendedStatus to a button in the iOS
	// client and watching JS logs. Disabling until we have a root cause / fix.
	kbfsParams := libkbfs.DefaultInitParams(kbCtx)
	kbfsConfig, err = libkbfs.Init(kbCtx, kbfsParams, serviceCn{}, func() {}, kbCtx.Log)
	if err != nil {
		return err
	}

	return Reset()
}
Ejemplo n.º 6
0
func TestVersionAndStop(t *testing.T) {

	tc := setupTest(t, "stop")

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("hit an error in Run, which might be masked: %v", err)
		}
		stopCh <- err
	}()

	tc2 := cloneContext(tc)

	vui := versionUI{
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(&vui)

	<-startCh
	version := client.NewCmdVersionRunner(tc2.G)

	if err := version.Run(); err != nil {
		t.Fatal(err)
	}

	vui.checkVersionOutput(t)

	stopper := client.NewCmdCtlStopRunner(tc2.G)

	if err := stopper.Run(); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 7
0
// ServerURI should match run mode environment.
func Init(homeDir string, runModeStr string, serverURI string, accessGroupOverride bool) {
	startOnce.Do(func() {
		g := libkb.G
		g.Init()
		usage := libkb.Usage{
			Config:    true,
			API:       true,
			KbKeyring: true,
		}
		runMode, err := libkb.StringToRunMode(runModeStr)
		if err != nil {
			fmt.Println("Error decoding run mode", err, runModeStr)
		}
		config := libkb.AppConfig{HomeDir: homeDir, RunMode: runMode, Debug: true, LocalRPCDebug: "Acsvip", ServerURI: serverURI, SecurityAccessGroupOverride: accessGroupOverride}
		err = libkb.G.Configure(config, usage)
		if err != nil {
			panic(err)
		}
		(service.NewService(g, false)).StartLoopbackServer()
		Reset()
	})
}
Ejemplo n.º 8
0
func TestRPCs(t *testing.T) {
	tc := setupTest(t, "resolve2")
	tc2 := cloneContext(tc)

	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	<-startCh

	// Add test RPC methods here.
	testIdentifyResolve2(t, tc2.G)
	testCheckInvitationCode(t, tc2.G)
	testLoadAllPublicKeysUnverified(t, tc2.G)
	testLoadUserWithNoKeys(t, tc2.G)

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 9
0
Archivo: main.go Proyecto: moul/client
func mainInner(g *libkb.GlobalContext) error {
	cl := libcmdline.NewCommandLine(true, client.GetExtraFlags())
	cl.AddCommands(client.GetCommands(cl, g))
	cl.AddCommands(service.GetCommands(cl, g))
	cl.AddHelpTopics(client.GetHelpTopics())

	var err error
	cmd, err = cl.Parse(os.Args)
	if err != nil {
		err = fmt.Errorf("Error parsing command line arguments: %s\n", err)
		return err
	}

	if cmd == nil {
		return nil
	}

	if !cl.IsService() {
		client.InitUI()
	}

	if err = g.ConfigureCommand(cl, cmd); err != nil {
		return err
	}
	g.StartupMessage()

	warnNonProd(g.Log, g.Env)

	if cl.IsService() {
		return cmd.Run()
	}

	// Start the server on the other end, possibly.
	// There are two cases in which we do this: (1) we want
	// a local loopback server in standalone mode; (2) we
	// need to "autofork" it. Do at most one of these
	// operations.
	if g.Env.GetStandalone() {
		if cl.IsNoStandalone() {
			return fmt.Errorf("Can't run command in standalone mode")
		}
		if err := service.NewService(false /* isDaemon */, g).StartLoopbackServer(); err != nil {
			if pflerr, ok := err.(libkb.PIDFileLockError); ok {
				err = fmt.Errorf("Can't run in standalone mode with a service running (see %q)",
					pflerr.Filename)
			}
			return err
		}
	} else {
		// If this command warrants an autofork, do it now.
		if fc := cl.GetForkCmd(); fc == libcmdline.ForceFork || (g.Env.GetAutoFork() && fc != libcmdline.NoFork) {
			if err = client.ForkServer(cl, g); err != nil {
				return err
			}
		}
		// Whether or not we autoforked, we're now running in client-server
		// mode (as opposed to standalone). Register a global LogUI so that
		// calls to G.Log() in the daemon can be copied to us. This is
		// something of a hack on the daemon side.
		if !g.Env.GetDoLogForward() {
			g.Log.Debug("Disabling log forwarding")
		} else {
			// TODO This triggers a connection to the RPC server before cmd.Run() is
			// called, so the command has no way to deal with errors on its own.
			// This should probably be moved into RegisterProtocols?
			// Also rpc.RegisterProtocolsWithContext seems to automatically add the
			// LogUIProtocol?
			err := registerGlobalLogUI(g)
			if err != nil {
				return err
			}
		}
	}

	return cmd.Run()
}
Ejemplo n.º 10
0
func TestSignupLogout(t *testing.T) {
	tc := setupTest(t, "signup")
	tc2 := cloneContext(tc)
	tc5 := cloneContext(tc)

	libkb.G.LocalDb = nil

	// Hack the various portions of the service that aren't
	// properly contextified.

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("sgnup")

	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	logout := client.NewCmdLogoutRunner(tc2.G)

	<-startCh

	nh := newNotifyHandler()

	// Launch the server that will listen for notifications on updates, such as logout
	launchServer := func(nh *notifyHandler) error {
		cli, xp, err := client.GetRPCClientWithContext(tc5.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.NotifySessionProtocol(nh)); err != nil {
			return err
		}
		if err = srv.Register(keybase1.NotifyUsersProtocol(nh)); err != nil {
			return err
		}
		ncli := keybase1.NotifyCtlClient{Cli: cli}
		if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{
			Session: true,
			Users:   true,
		}); err != nil {
			return err
		}
		return nil
	}

	// Actually launch it in the background
	go func() {
		err := launchServer(nh)
		if err != nil {
			nh.errCh <- err
		}
	}()

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState())
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case u := <-nh.loginCh:
		if u != userInfo.username {
			t.Fatalf("bad username in login notifcation: %q != %q", u, userInfo.username)
		}
		tc.G.Log.Debug("Got notification of login for %q", u)
	}

	btc := client.NewCmdBTCRunner(tc2.G)
	btc.SetAddress("1HUCBSJeHnkhzrVKVjaVmWg2QtZS1mdfaz")
	if err := btc.Run(); err != nil {
		t.Fatal(err)
	}

	// Now let's be sure that we get a notification back as we expect.
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case uid := <-nh.userCh:
		tc.G.Log.Debug("Got notification from user changed handled (%s)", uid)
		if e := libkb.CheckUIDAgainstUsername(uid, userInfo.username); e != nil {
			t.Fatalf("Bad UID back: %s != %s (%s)", uid, userInfo.username, e)
		}
	}

	// Fire a logout
	if err := logout.Run(); err != nil {
		t.Fatal(err)
	}

	// Now let's be sure that we get a notification back as we expect.
	select {
	case err := <-nh.errCh:
		t.Fatalf("Error before notify: %v", err)
	case <-nh.logoutCh:
		tc.G.Log.Debug("Got notification from logout handler")
	}

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}

	// Check that we only get one notification, not two
	select {
	case _, ok := <-nh.logoutCh:
		if ok {
			t.Fatal("Received an extra logout notification!")
		}
	default:
	}

}
Ejemplo n.º 11
0
func TestPassphraseChange(t *testing.T) {
	tc := setupTest(t, "pp")
	tc2 := cloneContext(tc)

	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()
	<-startCh

	userInfo := randomUser("pp")

	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	stopper := client.NewCmdCtlStopRunner(tc2.G)

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(userInfo.passphrase); err != nil {
		t.Fatal(err)
	}

	oldPassphrase := userInfo.passphrase
	newPassphrase := userInfo.passphrase + userInfo.passphrase
	sui.info.passphrase = newPassphrase
	change := client.NewCmdPassphraseChangeRunner(tc2.G)

	if err := change.Run(); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(oldPassphrase); err == nil {
		t.Fatal("old passphrase passed verification after passphrase change")
	}

	if err := stopper.Run(); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 12
0
func TestSecretUI(t *testing.T) {
	tc := setupTest(t, "secret_ui")
	tc1 := cloneContext(tc)
	tc2 := cloneContext(tc)

	// Make sure we're not using G anywhere in our tests.
	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	// Wait for the server to start up
	<-startCh

	var err error
	check := func() {
		if err != nil {
			t.Fatal(err)
		}
	}

	sui := newSecretUI()
	cli, xp, err := client.GetRPCClientWithContext(tc2.G)
	check()
	srv := rpc.NewServer(xp, nil)
	err = srv.Register(keybase1.SecretUiProtocol(sui))
	check()
	ncli := keybase1.DelegateUiCtlClient{Cli: cli}
	err = ncli.RegisterSecretUI(context.TODO())
	check()

	// run login command
	loginCmdUI := &loginCmdUI{
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(loginCmdUI)
	cmd := client.NewCmdLoginRunner(tc2.G)
	err = cmd.Run()
	if err == nil {
		t.Fatal("login worked, when it should have failed")
	}

	// check that delegate ui was called:
	if !sui.getKeybasePassphrase {
		t.Logf("secret ui: %+v", sui)
		t.Error("delegate secret UI GetKeybasePassphrase was not called during login cmd")
	}

	stopper := client.NewCmdCtlStopRunner(tc1.G)
	if err := stopper.Run(); err != nil {
		t.Errorf("Error in stopping service: %v", err)
	}

	// If the server failed, it's also an error
	err = <-stopCh
	check()
}
Ejemplo n.º 13
0
func TestGregorForwardToElectron(t *testing.T) {
	tc := setupTest(t, "gregor")
	defer tc.Cleanup()
	tc1 := cloneContext(tc)

	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	stopCh := make(chan error)
	go func() {
		tc.G.Log.Debug("+ Service.Run")
		err := svc.Run()
		tc.G.Log.Debug("- Service.Run")
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("grgr")
	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc.G),
	}
	tc.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc.G)
	signup.SetTest()

	// Wait for the server to start up
	<-startCh

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc.G.Log.Debug("Login State: %v", tc.G.LoginState())

	var err error
	check := func() {
		if err != nil {
			t.Fatal(err)
		}
	}
	cli, xp, err := client.GetRPCClientWithContext(tc1.G)
	srv := rpc.NewServer(xp, nil)
	em := newElectronMock(tc.G)
	err = srv.Register(keybase1.GregorUIProtocol(em))
	check()
	ncli := keybase1.DelegateUiCtlClient{Cli: cli}

	// Spin until gregor comes up; it should come up after signup
	var ok bool
	for i := 0; !ok && i < 40; i++ {
		if ok = svc.HasGregor(); !ok {
			time.Sleep(50 * time.Millisecond)
		} else {
			tc.G.Log.Debug("spinning, waiting for gregor to come up (attempt %d)", i)
		}
	}
	if !ok {
		t.Fatal("Gregor never came up after we signed up")
	}

	svc.SetGregorPushStateFilter(func(m gregor.Message) bool {
		cat := m.ToInBandMessage().ToStateUpdateMessage().Creation().Category()
		return cat.String() != "user.identity_change" && cat.String() != "user.key_change"
	})
	err = ncli.RegisterGregorFirehose(context.TODO())
	check()

	select {
	case a := <-em.stateCh:
		if a.Reason != keybase1.PushReason_RECONNECTED {
			t.Fatal(fmt.Sprintf("got wrong reason: %v", a.Reason))
		}
		if d := len(filterPubsubdItems(a.State.Items_)); d != 0 {
			t.Fatal(fmt.Sprintf("Wrong number of items in state -- should have 0, but got %d", d))
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got a reconnect message")
	}

	msgID, err := svc.GregorInject("foo", []byte("bar"))
	check()
	err = svc.GregorInjectOutOfBandMessage("baz", []byte("bip"))
	check()

	checkState := func(s gregor1.State) {
		items := filterPubsubdItems(s.Items_)
		if n := len(items); n != 1 {
			t.Errorf("Expected one item back; got %d", n)
			return
		}
		i := items[0]
		if !bytes.Equal(i.Md_.MsgID_.Bytes(), msgID.Bytes()) {
			t.Error("Wrong gregor message ID received")
		}
		if i.Item_.Category_.String() != "foo" {
			t.Error("Wrong gregor category")
		}
		if string(i.Item_.Body_.Bytes()) != "bar" {
			t.Error("Wrong gregor body")
		}
	}

	select {
	case pushArg := <-em.stateCh:
		checkState(pushArg.State)
		if pushArg.Reason != keybase1.PushReason_NEW_DATA {
			t.Errorf("wrong reason for push: %v", pushArg.Reason)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an IBM")
	}

	select {
	case oobm := <-em.oobmCh:
		if oobm.System_ != "baz" {
			t.Fatalf("Got wrong OOBM system: %s", oobm.System_)
		}
		if s := string(oobm.Body_); s != "bip" {
			t.Fatalf("Got wrong OOBM body: %s", s)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an OOBM")
	}

	svc.SimulateGregorCrashForTesting()
	select {
	case pushArg := <-em.stateCh:
		checkState(pushArg.State)
		if pushArg.Reason != keybase1.PushReason_RECONNECTED {
			t.Errorf("wrong reason for push: %v", pushArg.Reason)
		}
	case <-time.After(3 * time.Second):
		t.Fatalf("never got an IBM")
	}

	gcli := keybase1.GregorClient{Cli: cli}
	state, err := gcli.GetState(context.TODO())
	check()
	checkState(state)

	if err := client.CtlServiceStop(tc.G); err != nil {
		t.Fatal(err)
	}
	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}

}
Ejemplo n.º 14
0
Archivo: main.go Proyecto: qbit/client
// AutoFork? Standalone? ClientServer? Brew service?  This function deals with the
// various run configurations that we can run in.
func configureProcesses(g *libkb.GlobalContext, cl *libcmdline.CommandLine, cmd *libcmdline.Command) (err error) {

	g.Log.Debug("+ configureProcesses")
	defer func() {
		g.Log.Debug("- configureProcesses -> %v", err)
	}()

	// On Linux, the service configures its own autostart file. Otherwise, no
	// need to configure if we're a service.
	if cl.IsService() {
		g.Log.Debug("| in configureProcesses, is service")
		if runtime.GOOS == "linux" {
			g.Log.Debug("| calling AutoInstall")
			_, err := install.AutoInstall(g, "", false, g.Log)
			if err != nil {
				return err
			}
		}
		return nil
	}

	// Start the server on the other end, possibly.
	// There are two cases in which we do this: (1) we want
	// a local loopback server in standalone mode; (2) we
	// need to "autofork" it. Do at most one of these
	// operations.
	if g.Env.GetStandalone() {
		if cl.IsNoStandalone() {
			err = fmt.Errorf("Can't run command in standalone mode")
			return err
		}
		err := service.NewService(g, false /* isDaemon */).StartLoopbackServer()
		if err != nil {
			if pflerr, ok := err.(libkb.PIDFileLockError); ok {
				err = fmt.Errorf("Can't run in standalone mode with a service running (see %q)",
					pflerr.Filename)
				return err
			}
		}
		return err
	}

	// After this point, we need to provide a remote logging story if necessary

	// If this command specifically asks not to be forked, then we are done in this
	// function. This sort of thing is true for the `ctl` commands and also the `version`
	// command.
	fc := cl.GetForkCmd()
	if fc == libcmdline.NoFork {
		return configureLogging(g, cl)
	}

	var newProc bool
	if libkb.IsBrewBuild {
		// If we're running in Brew mode, we might need to install ourselves as a persistent
		// service for future invocations of the command.
		newProc, err = install.AutoInstall(g, "", false, g.Log)
		if err != nil {
			return err
		}
	} else {
		// If this command warrants an autofork, do it now.
		if fc == libcmdline.ForceFork || g.Env.GetAutoFork() {
			newProc, err = client.AutoForkServer(g, cl)
			if err != nil {
				return err
			}
		}
	}

	// Restart the service if we see that it's out of date. It's important to do this
	// before we make any RPCs to the service --- for instance, before the logging
	// calls below. See the v1.0.8 update fiasco for more details. Also, only need
	// to do this if we didn't just start a new process.
	if !newProc {
		if err = client.FixVersionClash(g, cl); err != nil {
			return err
		}
	}

	g.Log.Debug("| After forks; newProc=%v", newProc)
	if err = configureLogging(g, cl); err != nil {
		return err
	}

	// This sends the client's PATH to the service so the service can update
	// its PATH if necessary. This is called after FixVersionClash(), which
	// happens above in configureProcesses().
	if err = configurePath(g, cl); err != nil {
		// Further note -- don't die here.  It could be we're calling this method
		// against an earlier version of the service that doesn't support it.
		// It's not critical that it succeed, so continue on.
		g.Log.Debug("Configure path failed: %v", err)
	}

	return nil
}
Ejemplo n.º 15
0
func TestDelegateUI(t *testing.T) {
	tc := setupTest(t, "delegate_ui")
	tc1 := cloneContext(tc)
	tc2 := cloneContext(tc)

	// Make sure we're not using G anywhere in our tests.
	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	// Wait for the server to start up
	<-startCh

	dui := newDelegateUI()

	launchDelegateUI := func(dui *delegateUI) error {
		cli, xp, err := client.GetRPCClientWithContext(tc2.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.IdentifyUiProtocol(dui)); err != nil {
			return err
		}
		ncli := keybase1.DelegateUiCtlClient{Cli: cli}
		if err = ncli.RegisterIdentifyUI(context.TODO()); err != nil {
			return err
		}
		return nil
	}

	// Launch the delegate UI
	if err := launchDelegateUI(dui); err != nil {
		t.Fatal(err)
	}

	id := client.NewCmdIDRunner(tc1.G)
	id.SetUser("t_alice")
	id.UseDelegateUI()
	if err := id.Run(); err != nil {
		t.Errorf("Error in Run: %v", err)
	}

	// We should get either a 'done' or an 'error' from the delegateUI.
	err, ok := <-dui.ch
	if err != nil {
		t.Errorf("Error with delegate UI: %v", err)
	} else if ok {
		t.Errorf("Delegate UI didn't close the channel properly")
	} else if err = dui.checkSuccess(); err != nil {
		t.Error(err)
	}

	stopper := client.NewCmdCtlStopRunner(tc1.G)
	if err := stopper.Run(); err != nil {
		t.Errorf("Error in stopping service: %v", err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 16
0
// AutoFork? Standalone? ClientServer? Brew service?  This function deals with the
// various run configurations that we can run in.
func configureProcesses(g *libkb.GlobalContext, cl *libcmdline.CommandLine, cmd *libcmdline.Command) (err error) {

	g.Log.Debug("+ configureProcesses")
	defer func() {
		g.Log.Debug("- configureProcesses -> %v", err)
	}()

	// On Linux, the service configures its own autostart file. Otherwise, no
	// need to configure if we're a service.
	if cl.IsService() {
		g.Log.Debug("| in configureProcesses, is service")
		if runtime.GOOS == "linux" {
			g.Log.Debug("| calling AutoInstall")
			_, err := install.AutoInstall(g, "", false)
			if err != nil {
				return err
			}
		}
		return nil
	}

	// Start the server on the other end, possibly.
	// There are two cases in which we do this: (1) we want
	// a local loopback server in standalone mode; (2) we
	// need to "autofork" it. Do at most one of these
	// operations.
	if g.Env.GetStandalone() {
		if cl.IsNoStandalone() {
			err = fmt.Errorf("Can't run command in standalone mode")
			return err
		}
		err := service.NewService(g, false /* isDaemon */).StartLoopbackServer()
		if err != nil {
			if pflerr, ok := err.(libkb.PIDFileLockError); ok {
				err = fmt.Errorf("Can't run in standalone mode with a service running (see %q)",
					pflerr.Filename)
				return err
			}
		}
		return err
	}

	// After this point, we need to provide a remote logging story if necessary

	// If this command specifically asks not to be forked, then we are done in this
	// function. This sort of thing is true for the `ctl` commands and also the `version`
	// command.
	fc := cl.GetForkCmd()
	if fc == libcmdline.NoFork {
		return configureLogging(g, cl)
	}

	// If this command warrants an autofork, do it now.
	var newProc bool
	if fc == libcmdline.ForceFork || g.Env.GetAutoFork() {
		newProc, err = client.AutoForkServer(g, cl)
		if err != nil {
			return err
		}
	} else if libkb.IsBrewBuild {
		// If we're running in Brew mode, we might need to install ourselves as a persistent
		// service for future invocations of the command.
		newProc, err = install.AutoInstall(g, "", false)
		if err != nil {
			return err
		}
	}

	g.Log.Debug("| After forks; newProc=%v", newProc)
	if err = configureLogging(g, cl); err != nil {
		return err
	}

	// If we have created a new proc, then there's no need to keep going to the
	// final step, which is to check for a version clashes.
	if newProc {
		return nil
	}

	// Finally, we'll restart the service if we see that it's out of date.
	if err = client.FixVersionClash(g, cl); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 17
0
func TestTrackingNotifications(t *testing.T) {
	tc := setupTest(t, "signup")
	tc2 := cloneContext(tc)
	tc5 := cloneContext(tc)

	libkb.G.LocalDb = nil

	// Hack the various portions of the service that aren't
	// properly contextified.

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()

	userInfo := randomUser("sgnup")

	tui := trackingUI{
		signupUI: signupUI{
			info:         userInfo,
			Contextified: libkb.NewContextified(tc2.G),
		},
	}
	tc2.G.SetUI(&tui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	<-startCh

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}
	tc2.G.Log.Debug("Login State: %v", tc2.G.LoginState())

	nh := newTrackingNotifyHandler()

	// Launch the server that will listen for tracking notifications.
	launchServer := func(nh *trackingNotifyHandler) error {
		cli, xp, err := client.GetRPCClientWithContext(tc5.G)
		if err != nil {
			return err
		}
		srv := rpc.NewServer(xp, nil)
		if err = srv.Register(keybase1.NotifyTrackingProtocol(nh)); err != nil {
			return err
		}
		ncli := keybase1.NotifyCtlClient{Cli: cli}
		if err = ncli.SetNotifications(context.TODO(), keybase1.NotificationChannels{
			Tracking: true,
		}); err != nil {
			return err
		}
		return nil
	}

	// Actually launch it in the background
	go func() {
		err := launchServer(nh)
		if err != nil {
			nh.errCh <- err
		}
	}()

	// Have our test user track t_alice.
	trackCmd := client.NewCmdTrackRunner(tc2.G)
	trackCmd.SetUser("t_alice")
	trackCmd.SetOptions(keybase1.TrackOptions{BypassConfirm: true})
	err := trackCmd.Run()
	if err != nil {
		t.Fatal(err)
	}

	// Do a check for new tracking statements that should fire off a
	// notification. Currently the track command above does not fetch the new
	// chain link from the server, so this call is required. It's possible that
	// TrackEngine (or our signature caching code) might change in the future,
	// making this call unnecessary.
	checkTrackingCmd := client.NewCmdCheckTrackingRunner(tc2.G)
	err = checkTrackingCmd.Run()
	if err != nil {
		t.Fatal(err)
	}

	// Wait to get a notification back as we expect.
	// NOTE: If this test ever starts deadlocking here, it's possible that
	// we've changed how we cache signatures that we make on the local client,
	// in such a way that the fetch done by CheckTracking above doesn't find
	// any "isOwnNewLinkFromServer" links. If so, one way to fix this test
	// would be to blow away the local db before calling CheckTracking.
	tc.G.Log.Debug("Waiting for two tracking notifications.")
	for i := 0; i < 2; i++ {
		select {
		case err := <-nh.errCh:
			t.Fatalf("Error before notify: %v", err)
		case arg := <-nh.trackingCh:
			tAliceUID := keybase1.UID("295a7eea607af32040647123732bc819")
			tc.G.Log.Debug("Got tracking changed notification (%#v)", arg)
			if "t_alice" == arg.Username {
				if !tAliceUID.Equal(arg.Uid) {
					t.Fatalf("Bad UID back: %s != %s", tAliceUID, arg.Uid)
				}
			} else if userInfo.username == arg.Username {
				if !tc.G.Env.GetUID().Equal(arg.Uid) {
					t.Fatalf("Bad UID back: %s != %s", tc.G.Env.GetUID(), arg.Uid)
				}
			} else {
				t.Fatalf("Bad username back: %s != %s || %s", arg.Username, "t_alice", userInfo.username)
			}
		}
	}

	if err := client.CtlServiceStop(tc2.G); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 18
0
func TestPassphraseRecover(t *testing.T) {
	tc := setupTest(t, "pp")
	tc2 := cloneContext(tc)

	libkb.G.LocalDb = nil

	defer tc.Cleanup()

	stopCh := make(chan error)
	svc := service.NewService(tc.G, false)
	startCh := svc.GetStartChannel()
	go func() {
		err := svc.Run()
		if err != nil {
			t.Logf("Running the service produced an error: %v", err)
		}
		stopCh <- err
	}()
	<-startCh

	userInfo := randomUser("pp")

	sui := signupUI{
		info:         userInfo,
		Contextified: libkb.NewContextified(tc2.G),
	}
	tc2.G.SetUI(&sui)
	signup := client.NewCmdSignupRunner(tc2.G)
	signup.SetTest()

	stopper := client.NewCmdCtlStopRunner(tc2.G)

	if err := signup.Run(); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(userInfo.passphrase); err != nil {
		t.Fatal(err)
	}

	// logout before recovering passphrase
	logout := client.NewCmdLogoutRunner(tc2.G)
	if err := logout.Run(); err != nil {
		t.Fatal(err)
	}

	// the paper key displayed during signup is in userInfo now, and it will be used
	// during passphrase recovery
	tc.G.Log.Debug("signup paper key: %s", userInfo.displayedPaperKey)

	oldPassphrase := userInfo.passphrase
	newPassphrase := userInfo.passphrase + userInfo.passphrase
	sui.info.passphrase = newPassphrase
	recoverCmd := client.NewCmdPassphraseRecoverRunner(tc2.G)

	if err := recoverCmd.Run(); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(newPassphrase); err != nil {
		t.Fatal(err)
	}

	if _, err := tc.G.LoginState().VerifyPlaintextPassphrase(oldPassphrase); err == nil {
		t.Fatal("old passphrase passed verification after passphrase change")
	}

	if err := stopper.Run(); err != nil {
		t.Fatal(err)
	}

	// If the server failed, it's also an error
	if err := <-stopCh; err != nil {
		t.Fatal(err)
	}
}