// NewTransport gives new resilient transport for the given ClientOptions. func NewTransport(opts *ClientOptions) *aws.Config { cfg := aws.NewConfig().WithHTTPClient(httputil.DefaultRestClient(opts.Debug)) retryer := &transportRetryer{ MaxTries: 3, } if opts.Log != nil { retryer.Log = opts.Log.New("transport") } return request.WithRetryer(cfg, retryer) }
// RegisterCommand displays version information like Environment or Kite Query ID. func RegisterCommand(c *cli.Context, log logging.Logger, _ string) int { rr := initRegisterRequest(c) r, err := checkAndAskRequiredFields(rr) if err != nil { fmt.Fprintln(os.Stderr, "Register failed with error:", err) log.Error("%s", err) return 1 } host := config.Konfig.Endpoints.Koding.Public.String() client := httputil.DefaultRestClient(false) // TODO ~mehmetali // handle --alreadyMember flag with various option. // There might be some situations that errors need to be ignored token, err := doRegisterRequest(r, client, host) if err != nil { fmt.Fprintln(os.Stderr, "Register failed with error:", err) log.Error("%s", err) return 1 } clientID, err := doLoginRequest(client, host, token) if err != nil { // we don't need to inform user about the error after user registered successfully log.Error("%s", err) return 1 } // team cannot be empty (because of required while registering) // otherwise it return error while registering user // store groupName or slug as "team" inside the cache session := &endpointauth.Session{ ClientID: clientID, Team: r.Slug, } // Set clientId and teamname into the kd.bolt endpointauth.Use(session) return 0 }
// New gives new, registered kloud kite. // // If conf contains invalid or missing configuration, it return non-nil error. func New(conf *Config) (*Kloud, error) { k := kite.New(stack.NAME, stack.VERSION) k.Config = kiteconfig.MustGet() k.Config.Port = conf.Port k.ClientFunc = httputil.ClientFunc(conf.DebugMode) if conf.DebugMode { k.SetLogLevel(kite.DEBUG) } if conf.Region != "" { k.Config.Region = conf.Region } if conf.Environment != "" { k.Config.Environment = conf.Environment } // TODO(rjeczalik): refactor modelhelper methods to not use global DB modelhelper.Initialize(conf.MongoURL) sess, err := newSession(conf, k) if err != nil { return nil, err } e := newEndpoints(conf) sess.Log.Debug("Konfig.Endpoints: %s", util.LazyJSON(e)) authUsers := map[string]string{ "kloudctl": conf.KloudSecretKey, } restClient := httputil.DefaultRestClient(conf.DebugMode) storeOpts := &credential.Options{ MongoDB: sess.DB, Log: sess.Log.New("stackcred"), Client: restClient, } if !conf.NoSneaker { storeOpts.CredURL = e.Social().WithPath("/credential").Private.URL } sess.Log.Debug("storeOpts: %+v", storeOpts) userPrivateKey, userPublicKey := userMachinesKeys(conf.UserPublicKey, conf.UserPrivateKey) stacker := &provider.Stacker{ DB: sess.DB, Log: sess.Log, Kite: sess.Kite, Userdata: sess.Userdata, Debug: conf.DebugMode, KloudSecretKey: conf.KloudSecretKey, CredStore: credential.NewStore(storeOpts), TunnelURL: conf.TunnelURL, SSHKey: &publickeys.Keys{ KeyName: publickeys.DeployKeyName, PrivateKey: userPrivateKey, PublicKey: userPublicKey, }, } stats := common.MustInitMetrics(Name) kloud := &Kloud{ Kite: k, Stack: stack.New(), Queue: &queue.Queue{ Interval: 5 * time.Second, Log: sess.Log.New("queue"), Kite: k, MongoDB: sess.DB, }, } authFn := func(opts *api.AuthOptions) (*api.Session, error) { s, err := modelhelper.FetchOrCreateSession(opts.User.Username, opts.User.Team) if err != nil { return nil, err } return &api.Session{ ClientID: s.ClientId, User: &api.User{ Username: s.Username, Team: s.GroupName, }, }, nil } transport := &api.Transport{ RoundTripper: storeOpts.Client.Transport, AuthFunc: api.NewCache(authFn).Auth, Debug: conf.DebugMode, } if conf.DebugMode { transport.Log = sess.Log } kloud.Stack.Endpoints = e kloud.Stack.Userdata = sess.Userdata kloud.Stack.DescribeFunc = provider.Desc kloud.Stack.CredClient = credential.NewClient(storeOpts) kloud.Stack.MachineClient = machine.NewClient(machine.NewMongoDatabase()) kloud.Stack.TeamClient = team.NewClient(team.NewMongoDatabase()) kloud.Stack.PresenceClient = client.NewInternal(e.Social().Private.String()) kloud.Stack.PresenceClient.HTTPClient = restClient kloud.Stack.RemoteClient = &remoteapi.Client{ Client: storeOpts.Client, Transport: transport, Endpoint: e.Koding.Private.URL, } kloud.Stack.ContextCreator = func(ctx context.Context) context.Context { return session.NewContext(ctx, sess) } kloud.Stack.Metrics = stats // RSA key pair that we add to the newly created machine for // provisioning. kloud.Stack.PublicKeys = stacker.SSHKey kloud.Stack.DomainStorage = sess.DNSStorage kloud.Stack.Domainer = sess.DNSClient kloud.Stack.Locker = stacker kloud.Stack.Log = sess.Log kloud.Stack.SecretKey = conf.KloudSecretKey for _, p := range provider.All() { s := stacker.New(p) if err = kloud.Stack.AddProvider(p.Name, s); err != nil { return nil, err } kloud.Queue.Register(s) sess.Log.Debug("registering %q provider", p.Name) } go kloud.Queue.Run() if conf.KeygenAccessKey != "" && conf.KeygenSecretKey != "" { cfg := &keygen.Config{ AccessKey: conf.KeygenAccessKey, SecretKey: conf.KeygenSecretKey, Region: conf.KeygenRegion, Bucket: conf.KeygenBucket, AuthExpire: conf.KeygenTokenTTL, AuthFunc: kloud.Stack.ValidateUser, Kite: k, } kloud.Keygen = keygen.NewServer(cfg) } else { k.Log.Warning(`disabling "keygen" methods due to missing S3/STS credentials`) } // Teams/stack handling methods. k.HandleFunc("plan", kloud.Stack.Plan) k.HandleFunc("apply", kloud.Stack.Apply) k.HandleFunc("describeStack", kloud.Stack.Status) k.HandleFunc("authenticate", kloud.Stack.Authenticate) k.HandleFunc("bootstrap", kloud.Stack.Bootstrap) k.HandleFunc("import", kloud.Stack.Import) // Credential handling. k.HandleFunc("credential.describe", kloud.Stack.CredentialDescribe) k.HandleFunc("credential.list", kloud.Stack.CredentialList) k.HandleFunc("credential.add", kloud.Stack.CredentialAdd) // Authorization handling. k.HandleFunc("auth.login", kloud.Stack.AuthLogin) k.HandleFunc("auth.passwordLogin", kloud.Stack.AuthPasswordLogin).DisableAuthentication() // Configuration handling. k.HandleFunc("config.metadata", kloud.Stack.ConfigMetadata) // Team handling. k.HandleFunc("team.list", kloud.Stack.TeamList) k.HandleFunc("team.whoami", kloud.Stack.TeamWhoami) // Machine handling. k.HandleFunc("machine.list", kloud.Stack.MachineList) // Single machine handling. k.HandleFunc("stop", kloud.Stack.Stop) k.HandleFunc("start", kloud.Stack.Start) k.HandleFunc("info", kloud.Stack.Info) k.HandleFunc("event", kloud.Stack.Event) // Klient proxy methods. k.HandleFunc("admin.add", kloud.Stack.AdminAdd) k.HandleFunc("admin.remove", kloud.Stack.AdminRemove) k.HandleHTTPFunc("/healthCheck", artifact.HealthCheckHandler(Name)) k.HandleHTTPFunc("/version", artifact.VersionHandler()) for worker, key := range authUsers { worker, key := worker, key k.Authenticators[worker] = func(r *kite.Request) error { if r.Auth.Key != key { return errors.New("wrong secret key passed, you are not authenticated") } return nil } } if conf.DebugMode { // This should be actually debug level 2. It outputs every single Kite // message and enables the kite debugging system. So enable it only if // you need it. // k.SetLogLevel(kite.DEBUG) k.Log.Info("Debug mode enabled") } if conf.TestMode { k.Log.Info("Test mode enabled") } registerURL := k.RegisterURL(!conf.Public) if conf.RegisterURL != "" { u, err := url.Parse(conf.RegisterURL) if err != nil { return nil, fmt.Errorf("Couldn't parse register url: %s", err) } registerURL = u } if err := k.RegisterForever(registerURL); err != nil { return nil, err } return kloud, nil }
// NewKlient returns a new Klient instance func NewKlient(conf *KlientConfig) (*Klient, error) { // this is our main reference to count and measure metrics for the klient // we count only those methods, please add/remove methods here that will // reset the timer of a klient. usg := usage.NewUsage(map[string]bool{ "fs.readDirectory": true, "fs.glob": true, "fs.readFile": true, "fs.writeFile": true, "fs.uniquePath": true, "fs.getInfo": true, "fs.setPermissions": true, "fs.remove": true, "fs.rename": true, "fs.createDirectory": true, "fs.move": true, "fs.copy": true, "webterm.getSessions": true, "webterm.connect": true, "webterm.killSession": true, "webterm.killSessions": true, "webterm.rename": true, "exec": true, "klient.share": true, "klient.unshare": true, "klient.shared": true, "sshkeys.List": true, "sshkeys.Add": true, "sshkeys.Delete": true, "storage.Get": true, "storage.Set": true, "storage.Delete": true, "log.upload": true, // "docker.create": true, // "docker.connect": true, // "docker.stop": true, // "docker.start": true, // "docker.remove": true, // "docker.list": true, }) // TODO(rjeczalik): Once klient installation method is reworked, // ensure flags are stored alongside konfig and do not // overwrite konfig here. if conf.KontrolURL != "" { konfig.Konfig.KontrolURL = conf.KontrolURL } // NOTE(rjeczalik): For backward-compatibility with old klient, // remove once not needed. if u, err := url.Parse(konfig.Konfig.KontrolURL); err == nil && konfig.Konfig.KontrolURL != "" { u.Path = "" konfig.Konfig.Endpoints.Koding = cfg.NewEndpointURL(u) } if conf.TunnelKiteURL != "" { u, err := url.Parse(conf.TunnelKiteURL) if err != nil { return nil, err } konfig.Konfig.Endpoints.Tunnel.Public.URL = u } k := newKite(conf) k.Config.VerifyAudienceFunc = verifyAudience if k.Config.KontrolURL == "" || k.Config.KontrolURL == "http://127.0.0.1:3000/kite" || !konfig.Konfig.Endpoints.Kontrol().Equal(konfig.Builtin.Endpoints.Kontrol()) { k.Config.KontrolURL = konfig.Konfig.Endpoints.Kontrol().Public.String() } term := terminal.New(k.Log, conf.ScreenrcPath) term.InputHook = usg.Reset db, err := openBoltDB(configstore.CacheOptions("klient")) if err != nil { k.Log.Warning("Couldn't open BoltDB: %s", err) } up := uploader.New(&uploader.Options{ KeygenURL: konfig.Konfig.Endpoints.Kloud().Public.String(), Kite: k, Bucket: conf.logBucketName(), Region: conf.logBucketRegion(), DB: db, Log: k.Log, }) vagrantOpts := &vagrant.Options{ Home: conf.VagrantHome, DB: db, // nil is ok, fallbacks to in-memory storage Log: k.Log, Debug: conf.Debug, Output: up.Output, } tunOpts := &tunnel.Options{ DB: db, Log: k.Log, Kite: k, NoProxy: conf.NoProxy, TunnelKiteURL: konfig.Konfig.Endpoints.Tunnel.Public.String(), } t, err := tunnel.New(tunOpts) if err != nil { log.Fatal(err) } if conf.UpdateInterval < time.Minute { k.Log.Warning("Update interval can't be less than one minute. Setting to one minute.") conf.UpdateInterval = time.Minute } // TODO(rjeczalik): Enable after TMS-848. // mountEvents := make(chan *mount.Event) remoteOpts := &remote.RemoteOptions{ Kite: k, Log: k.Log, Storage: storage.New(db), // EventSub: mountEvents, } machinesOpts := &machinegroup.GroupOpts{ Storage: storage.NewEncodingStorage(db, []byte("machines")), Builder: mclient.NewKiteBuilder(k), DynAddrInterval: 2 * time.Second, PingInterval: 15 * time.Second, } machines, err := machinegroup.New(machinesOpts) if err != nil { k.Log.Fatal("Cannot initialize machine group: %s", err) } c := k.NewClient(konfig.Konfig.Endpoints.Kloud().Public.String()) c.Auth = &kite.Auth{ Type: "kiteKey", Key: k.Config.KiteKey, } kloud := &apiutil.LazyKite{ Client: c, } restClient := httputil.DefaultRestClient(konfig.Konfig.Debug) restClient.Transport = &api.Transport{ RoundTripper: restClient.Transport, AuthFunc: (&apiutil.KloudAuth{ Kite: kloud, Storage: &apiutil.Storage{ &cfg.Cache{ EncodingStorage: storage.NewEncodingStorage(db, []byte("klient")), }, }, }).Auth, Log: k.Log.(logging.Logger), } kl := &Klient{ kite: k, collab: collaboration.New(db), // nil is ok, fallbacks to in memory storage storage: storage.New(db), // nil is ok, fallbacks to in memory storage tunnel: t, vagrant: vagrant.NewHandlers(vagrantOpts), // docker: docker.New("unix://var/run/docker.sock", k.Log), terminal: term, usage: usg, log: k.Log, config: conf, remote: remote.NewRemote(remoteOpts), uploader: up, machines: machines, updater: &Updater{ Endpoint: conf.UpdateURL, Interval: conf.UpdateInterval, CurrentVersion: conf.Version, KontrolURL: k.Config.KontrolURL, // MountEvents: mountEvents, Log: k.Log, }, logUploadDelay: 3 * time.Minute, presence: &presence.Client{ Endpoint: konfig.Konfig.Endpoints.Social().Public.WithPath("presence").URL, Client: restClient, }, presenceEvery: onceevery.New(1 * time.Hour), kloud: kloud, } kl.kite.OnRegister(kl.updateKiteKey) // Close all active sessions of the current. Do not close it immediately, // instead of give some time so users can safely exit. If the user // reconnects again the timer will be stopped so we don't unshare for // network hiccups accidentally. kl.collabCloser = NewDeferTime(time.Minute, func() { sharedUsers, err := kl.collab.GetAll() if err != nil { kl.log.Warning("Couldn't unshare users: %s", err) return } if len(sharedUsers) == 0 { return } kl.log.Info("Unsharing users '%s'", sharedUsers) for user, option := range sharedUsers { // dont touch permanent users if option.Permanent { kl.log.Info("User is permanent, avoiding it: %q", user) continue } if err := kl.collab.Delete(user); err != nil { kl.log.Warning("Couldn't delete user from storage: %s", err) } kl.terminal.CloseSessions(user) } }) // This is important, don't forget it kl.RegisterMethods() return kl, nil }