func AuthLogin(c *cli.Context, log logging.Logger, _ string) (int, error) { kodingURL, err := url.Parse(c.String("baseurl")) if err != nil { return 1, fmt.Errorf("%q is not a valid URL value: %s\n", c.String("koding"), err) } k, ok := configstore.List()[config.ID(kodingURL.String())] if !ok { k = &config.Konfig{ Endpoints: &config.Endpoints{ Koding: config.NewEndpointURL(kodingURL), }, } } if err := configstore.Use(k); err != nil { return 1, err } // We create here a kloud client instead of using kloud.DefaultClient // in order to handle first-time login attempts where configuration // for kloud does not yet exist. kloudClient := &kloud.Client{ Transport: &kloud.KiteTransport{ Konfig: k, Log: log, }, } testKloudHook(kloudClient) authClient := &auth.Client{ Kloud: kloudClient, } teamClient := &team.Client{ Kloud: kloudClient, } // If we already own a valid kite.key, it means we were already // authenticated and we just call kloud using kite.key authentication. err = kloudClient.Transport.(stack.Validator).Valid() log.Debug("auth: transport test: %s", err) opts := &auth.LoginOptions{ Team: c.String("team"), } if err != nil { opts.Username, err = helper.Ask("Username [%s]: ", config.CurrentUser.Username) if err != nil { return 1, err } if opts.Username == "" { opts.Username = config.CurrentUser.Username } for { opts.Password, err = helper.AskSecret("Password [***]: ") if err != nil { return 1, err } if opts.Password != "" { break } } } fmt.Fprintln(os.Stderr, "Logging to", kodingURL, "...") resp, err := authClient.Login(opts) if err != nil { return 1, fmt.Errorf("error logging into your Koding account: %v", err) } if resp.KiteKey != "" { k.KiteKey = resp.KiteKey k.Endpoints = resp.Metadata.Endpoints if err := configstore.Use(k); err != nil { return 1, err } } teamClient.Use(&team.Team{Name: resp.GroupName}) if c.Bool("json") { enc := json.NewEncoder(os.Stdout) enc.SetIndent("", "\t") enc.Encode(resp) } else { fmt.Fprintln(os.Stdout, "Successfully logged in to the following team:", resp.GroupName) } return 0, nil }
// NewKlient returns a new Klient instance func NewKlient(conf *KlientConfig) (*Klient, error) { // this is our main reference to count and measure metrics for the klient // we count only those methods, please add/remove methods here that will // reset the timer of a klient. usg := usage.NewUsage(map[string]bool{ "fs.readDirectory": true, "fs.glob": true, "fs.readFile": true, "fs.writeFile": true, "fs.uniquePath": true, "fs.getInfo": true, "fs.setPermissions": true, "fs.remove": true, "fs.rename": true, "fs.createDirectory": true, "fs.move": true, "fs.copy": true, "webterm.getSessions": true, "webterm.connect": true, "webterm.killSession": true, "webterm.killSessions": true, "webterm.rename": true, "exec": true, "klient.share": true, "klient.unshare": true, "klient.shared": true, "sshkeys.List": true, "sshkeys.Add": true, "sshkeys.Delete": true, "storage.Get": true, "storage.Set": true, "storage.Delete": true, "log.upload": true, // "docker.create": true, // "docker.connect": true, // "docker.stop": true, // "docker.start": true, // "docker.remove": true, // "docker.list": true, }) // TODO(rjeczalik): Once klient installation method is reworked, // ensure flags are stored alongside konfig and do not // overwrite konfig here. if conf.KontrolURL != "" { konfig.Konfig.KontrolURL = conf.KontrolURL } // NOTE(rjeczalik): For backward-compatibility with old klient, // remove once not needed. if u, err := url.Parse(konfig.Konfig.KontrolURL); err == nil && konfig.Konfig.KontrolURL != "" { u.Path = "" konfig.Konfig.Endpoints.Koding = cfg.NewEndpointURL(u) } if conf.TunnelKiteURL != "" { u, err := url.Parse(conf.TunnelKiteURL) if err != nil { return nil, err } konfig.Konfig.Endpoints.Tunnel.Public.URL = u } k := newKite(conf) k.Config.VerifyAudienceFunc = verifyAudience if k.Config.KontrolURL == "" || k.Config.KontrolURL == "http://127.0.0.1:3000/kite" || !konfig.Konfig.Endpoints.Kontrol().Equal(konfig.Builtin.Endpoints.Kontrol()) { k.Config.KontrolURL = konfig.Konfig.Endpoints.Kontrol().Public.String() } term := terminal.New(k.Log, conf.ScreenrcPath) term.InputHook = usg.Reset db, err := openBoltDB(configstore.CacheOptions("klient")) if err != nil { k.Log.Warning("Couldn't open BoltDB: %s", err) } up := uploader.New(&uploader.Options{ KeygenURL: konfig.Konfig.Endpoints.Kloud().Public.String(), Kite: k, Bucket: conf.logBucketName(), Region: conf.logBucketRegion(), DB: db, Log: k.Log, }) vagrantOpts := &vagrant.Options{ Home: conf.VagrantHome, DB: db, // nil is ok, fallbacks to in-memory storage Log: k.Log, Debug: conf.Debug, Output: up.Output, } tunOpts := &tunnel.Options{ DB: db, Log: k.Log, Kite: k, NoProxy: conf.NoProxy, TunnelKiteURL: konfig.Konfig.Endpoints.Tunnel.Public.String(), } t, err := tunnel.New(tunOpts) if err != nil { log.Fatal(err) } if conf.UpdateInterval < time.Minute { k.Log.Warning("Update interval can't be less than one minute. Setting to one minute.") conf.UpdateInterval = time.Minute } // TODO(rjeczalik): Enable after TMS-848. // mountEvents := make(chan *mount.Event) remoteOpts := &remote.RemoteOptions{ Kite: k, Log: k.Log, Storage: storage.New(db), // EventSub: mountEvents, } machinesOpts := &machinegroup.GroupOpts{ Storage: storage.NewEncodingStorage(db, []byte("machines")), Builder: mclient.NewKiteBuilder(k), DynAddrInterval: 2 * time.Second, PingInterval: 15 * time.Second, } machines, err := machinegroup.New(machinesOpts) if err != nil { k.Log.Fatal("Cannot initialize machine group: %s", err) } c := k.NewClient(konfig.Konfig.Endpoints.Kloud().Public.String()) c.Auth = &kite.Auth{ Type: "kiteKey", Key: k.Config.KiteKey, } kloud := &apiutil.LazyKite{ Client: c, } restClient := httputil.DefaultRestClient(konfig.Konfig.Debug) restClient.Transport = &api.Transport{ RoundTripper: restClient.Transport, AuthFunc: (&apiutil.KloudAuth{ Kite: kloud, Storage: &apiutil.Storage{ &cfg.Cache{ EncodingStorage: storage.NewEncodingStorage(db, []byte("klient")), }, }, }).Auth, Log: k.Log.(logging.Logger), } kl := &Klient{ kite: k, collab: collaboration.New(db), // nil is ok, fallbacks to in memory storage storage: storage.New(db), // nil is ok, fallbacks to in memory storage tunnel: t, vagrant: vagrant.NewHandlers(vagrantOpts), // docker: docker.New("unix://var/run/docker.sock", k.Log), terminal: term, usage: usg, log: k.Log, config: conf, remote: remote.NewRemote(remoteOpts), uploader: up, machines: machines, updater: &Updater{ Endpoint: conf.UpdateURL, Interval: conf.UpdateInterval, CurrentVersion: conf.Version, KontrolURL: k.Config.KontrolURL, // MountEvents: mountEvents, Log: k.Log, }, logUploadDelay: 3 * time.Minute, presence: &presence.Client{ Endpoint: konfig.Konfig.Endpoints.Social().Public.WithPath("presence").URL, Client: restClient, }, presenceEvery: onceevery.New(1 * time.Hour), kloud: kloud, } kl.kite.OnRegister(kl.updateKiteKey) // Close all active sessions of the current. Do not close it immediately, // instead of give some time so users can safely exit. If the user // reconnects again the timer will be stopped so we don't unshare for // network hiccups accidentally. kl.collabCloser = NewDeferTime(time.Minute, func() { sharedUsers, err := kl.collab.GetAll() if err != nil { kl.log.Warning("Couldn't unshare users: %s", err) return } if len(sharedUsers) == 0 { return } kl.log.Info("Unsharing users '%s'", sharedUsers) for user, option := range sharedUsers { // dont touch permanent users if option.Permanent { kl.log.Info("User is permanent, avoiding it: %q", user) continue } if err := kl.collab.Delete(user); err != nil { kl.log.Warning("Couldn't delete user from storage: %s", err) } kl.terminal.CloseSessions(user) } }) // This is important, don't forget it kl.RegisterMethods() return kl, nil }
func migrateKonfigBolt(cache *config.Cache) error { var used usedKonfig var oldKonfig config.Konfig var konfigs = make(config.Konfigs) if err := cache.GetValue("konfig", &oldKonfig); isFatal(err) { return err } if err := cache.GetValue("konfigs", &konfigs); isFatal(err) { return err } if err := cache.GetValue("konfigs.used", &used); isFatal(err) { return err } // If old konfig exists, try to migrate it over to konfigs. if oldKonfig.Valid() == nil { id := oldKonfig.ID() if _, ok := konfigs[id]; !ok { if oldKonfig.Endpoints == nil { oldKonfig.Endpoints = &config.Endpoints{} } if u, err := url.Parse(oldKonfig.KontrolURL); err == nil && oldKonfig.KontrolURL != "" { u.Path = "" oldKonfig.Endpoints.Koding = config.NewEndpointURL(u) } if oldKonfig.TunnelURL != "" { oldKonfig.Endpoints.Tunnel = config.NewEndpoint(oldKonfig.TunnelURL) } // Best-effort attemp to ensure /etc/kite/kite.key is stored // in ~/.config/koding/konfig.bolt, so it is possible to // use kd / konfig with koding deployments that sign with // different kontrol keys, e.g. production <-> sandbox or // production <-> self-hosted opensource version. _ = migrateKiteKey(&oldKonfig) konfigs[id] = &oldKonfig _ = cache.SetValue("konfigs", konfigs) } } // If no konfig is in use (e.g. we just migrated one), // try to set to the default one. if used.ID == "" && len(konfigs) == 1 { for id, konfig := range konfigs { if konfig.Valid() == nil { _ = cache.SetValue("konfigs.used", &usedKonfig{ID: id}) } break } } return nil }