// NewMultiInstances fetches EC2 instance list from each region. func NewMultiInstances(clients *amazon.Clients, log logging.Logger) *MultiInstances { if log == nil { log = defaultLogger } var m = newMultiInstances() var wg sync.WaitGroup var mu sync.Mutex // protects m.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() instances, err := client.Instances() if err != nil { log.Error("[%s] fetching instances error: %s", region, err) return } log.Info("[%s] fetched %d instances", region, len(instances)) i := make(Instances, len(instances)) for _, instance := range instances { i[aws.StringValue(instance.InstanceId)] = instance } var ok bool mu.Lock() if _, ok = m.m[client]; !ok { m.m[client] = i } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, i)) } }(region, client) } wg.Wait() return m }
// NewMachine initializes a new Machine struct with any internal vars created. func NewMachine(meta MachineMeta, log logging.Logger, t Transport) (*Machine, error) { log = MachineLogger(meta, log) // Create our Pingers, to be used in the PingTrackers kitePinger := kitepinger.NewKitePinger(t) httpPinger, err := kitepinger.NewKiteHTTPPinger(meta.URL) if err != nil { log.Error( "Unable to create HTTPPinger from meta.URL. url:%s, err:%s", meta.URL, err, ) return nil, err } m := &Machine{ MachineMeta: meta, Log: log, KiteTracker: kitepinger.NewPingTracker(kitePinger), HTTPTracker: kitepinger.NewPingTracker(httpPinger), Transport: t, discover: discover.NewClient(), mountLocker: util.NewMutexWithState(), } m.discover.Log = m.Log.New("discover") // Start our http pinger, to give online/offline statuses for all machines. m.HTTPTracker.Start() return m, nil }
// NewAddresses fetches EC2 IP address list from each region. // // If log is nil, defaultLogger is used instead. func NewAddresses(clients *amazon.Clients, log logging.Logger) *Addresses { if log == nil { log = defaultLogger } a := newAddresses() var wg sync.WaitGroup var mu sync.Mutex // protects a.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() addresses, err := client.Addresses() if err != nil { log.Error("[%s] fetching IP addresses error: %s", region, err) return } log.Info("[%s] fetched %d addresses", region, len(addresses)) var ok bool mu.Lock() if _, ok = a.m[client]; !ok { a.m[client] = addresses } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, addresses)) } }(region, client) } wg.Wait() return a }
func CreateMetrics(appName string, log logging.Logger, outputMetrics bool) (*kodingmetrics.Metrics, *kodingmetrics.DogStatsD) { metric := kodingmetrics.New(appName) // if outputMetrics, do print output to the console if outputMetrics { // change those loggers // https://github.com/rcrowley/go-metrics/blob/37df06ff62a7d8b4473b48d355008c838da87561/log.go // get those numbers from config // output metrics every 1 minutes go metrics.Log(metric.Registry, 6e10, slog.New(os.Stderr, "metrics: ", slog.Lmicroseconds)) } // Left here for future reference // for Mac syslogPath := "/var/run/syslog" if runtime.GOOS != "darwin" { // for linux syslogPath = "/dev/log" } w, err := syslog.Dial("unixgram", syslogPath, syslog.LOG_INFO, "socialapi-metrics") if err != nil { log.Error("Err while initing syslog for metrics, metrics wont be in the syslog %s", err.Error()) } else { go metrics.Syslog(metric.Registry, 30e10, w) } statsd, err := kodingmetrics.NewDogStatsD(appName) if err == nil { go kodingmetrics.Collect(metric.Registry, statsd, 24e10) } return metric, statsd }
// Join copies data between local and remote connections. // It reads from one connection and writes to the other. // It's a building block for ProxyFunc implementations. func Join(local, remote net.Conn, log logging.Logger) { var wg sync.WaitGroup wg.Add(2) transfer := func(side string, dst, src net.Conn) { log.Debug("proxing %s -> %s", src.RemoteAddr(), dst.RemoteAddr()) n, err := io.Copy(dst, src) if err != nil { log.Error("%s: copy error: %s", side, err) } if err := src.Close(); err != nil { log.Debug("%s: close error: %s", side, err) } // not for yamux streams, but for client to local server connections if d, ok := dst.(*net.TCPConn); ok { if err := d.CloseWrite(); err != nil { log.Debug("%s: closeWrite error: %s", side, err) } } wg.Done() log.Debug("done proxing %s -> %s: %d bytes", src.RemoteAddr(), dst.RemoteAddr(), n) } go transfer("remote to local", local, remote) go transfer("local to remote", remote, local) wg.Wait() }
func New(log logging.Logger, client algoliasearch.Client, indexSuffix string) *Controller { // TODO later on listen channel_participant_added event and remove this koding channel fetch c := models.NewChannel() q := request.NewQuery() q.GroupName = "koding" q.Name = "public" q.Type = models.Channel_TYPE_GROUP channel, err := c.ByName(q) if err != nil { log.Error("Could not fetch koding channel: %s:", err) } var channelId string if channel.Id != 0 { channelId = strconv.FormatInt(channel.Id, 10) } controller := &Controller{ log: log, client: client, indexes: &IndexSet{ IndexTopics: &IndexSetItem{ Index: client.InitIndex(IndexTopics + indexSuffix), Settings: &Settings{ // empty slice means all properties will be searchable AttributesToIndex: []string{}, }, }, IndexAccounts: &IndexSetItem{ Index: client.InitIndex(IndexAccounts + indexSuffix), Settings: &Settings{ AttributesToIndex: []string{ "nick", "email", "firstName", "lastName", "_tags", }, UnretrievableAttributes: []string{"email"}, }, }, IndexMessages: &IndexSetItem{ Index: client.InitIndex(IndexMessages + indexSuffix), Settings: &Settings{ AttributesToIndex: []string{}, }, }, }, kodingChannelId: channelId, } return controller }
func setDefaults(log logging.Logger) { group, err := modelhelper.GetGroup(models.Channel_KODING_NAME) if err != nil { log.Error("err while fetching koding group: %s", err.Error()) return } log.Debug("mongo group found") setPublicChannel(log, group) setChangeLogChannel(log, group) log.Info("socialApi defaults are created") }
// FactoryCompletion implements codeganstas cli.Command's bash completion field func FactoryCompletion(factory CommandFactory, log logging.Logger, cmdName string) cli.BashCompleteFunc { return func(c *cli.Context) { cmd := factory(c, log, cmdName) // If the command implements AutocompleteCommand, run the autocomplete. if aCmd, ok := cmd.(AutocompleteCommand); ok { if err := aCmd.Autocomplete(c.Args()...); err != nil { log.Error( "Autocompletion of a command encountered error. command:%s, err:%s", cmdName, err, ) } } } }
// ExitErrAction implements a cli.Command's Action field for an ExitingErrCommand func ExitErrAction(f ExitingErrCommand, log logging.Logger, cmdName string) cli.ActionFunc { return func(c *cli.Context) error { exit, err := f(c, log, cmdName) if err != nil { log.Error("ExitErrAction encountered error. err:%s", err) // Print error message to the user. fmt.Fprintf(os.Stderr, "error executing %q command: %s\n", cmdName, err) } Close() ExitFunc(exit) return nil } }
func setChangeLogChannel(log logging.Logger, group *kodingmodels.Group) { c := models.NewChannel() selector := map[string]interface{}{ "type_constant": models.Channel_TYPE_ANNOUNCEMENT, "group_name": models.Channel_KODING_NAME, } // if err is nil // it means we already have that channel err := c.One(bongo.NewQS(selector)) if err != nil && err != bongo.RecordNotFound { log.Error("err while fetching changelog channel:", err.Error()) return } if err == bongo.RecordNotFound { log.Error("postgres changelog couldn't found, creating it") acc, err := createChannelOwner(group) if err != nil { log.Error(err.Error()) return } c.Name = "changelog" c.CreatorId = acc.Id c.GroupName = models.Channel_KODING_NAME c.TypeConstant = models.Channel_TYPE_ANNOUNCEMENT c.PrivacyConstant = models.Channel_PRIVACY_PRIVATE if err := c.Create(); err != nil { log.Error("err while creating the koding channel:", err.Error()) return } } socialApiAnnouncementChannelId := strconv.FormatInt(c.Id, 10) if group.SocialApiAnnouncementChannelId == socialApiAnnouncementChannelId { log.Info("mongo and postgres socialApiAnnouncementChannel ids are same") return } log.Debug("mongo and postgres socialApiAnnouncementChannel ids are different, fixing it") if err := updateGroupPartially(group.Id, "socialApiAnnouncementChannelId", strconv.FormatInt(c.Id, 10)); err != nil { log.Error("err while udpating socialApiAnnouncementChannelId:", err.Error()) return } }
// RestartCommand stops and starts klient. If Klient is not running to begin // with, it *just* starts klient. func RestartCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "restart") return 1 } log = log.New("cmd:restart") s, err := newService(nil) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } fmt.Printf("Restarting the %s, this may take a moment...\n", config.KlientName) klientWasRunning := IsKlientRunning(config.Konfig.Endpoints.Klient.Private.String()) if klientWasRunning { // If klient is running, stop it, and tell the user if we fail if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } } else { // If klient appears to not be running, try to stop it anyway. However, // because it may not actually be running, don't inform the user if we fail here. s.Stop() } err = WaitUntilStopped(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) if err != nil { log.Error( "Timed out while waiting for Klient to start. attempts:%d, err:%s", 5, err, ) fmt.Println(FailedStopKlient) return 1 } if klientWasRunning { fmt.Println("Stopped successfully.") } // No UX message needed, startKlient will do that itself. if err := startKlient(log, s); err != nil { log.Error("failed to start klient: %s", err) return 1 } fmt.Printf("Successfully restarted %s\n", config.KlientName) return 0 }
// startIntervalerIfNeeded starts the given rsync interval, logs any errors, and adds the // resulting Intervaler to the Mount struct for later Stoppage. func startIntervalerIfNeeded(log logging.Logger, remoteMachine *machine.Machine, c *rsync.Client, opts rsync.SyncIntervalOpts) { log = log.New("startIntervalerIfNeeded") if opts.Interval <= 0 { // Using debug, because this is not an error - just informative. log.Debug( "startIntervalerIfNeeded() called with interval:%d. Cannot start Intervaler", opts.Interval, ) return } log.Info("Creating and starting RSync SyncInterval") intervaler, err := c.SyncInterval(opts) if err != nil { log.Error("rsync SyncInterval returned an error:%s", err) return } remoteMachine.Intervaler = intervaler }
// StopCommand stop local klient. Requires sudo. func StopCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "stop") return 1 } s, err := newService(nil) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalError) return 1 } if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } err = WaitUntilStopped(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) if err != nil { log.Error( "Timed out while waiting for Klient to start. attempts:%d, err:%s", 5, err, ) fmt.Println(FailedStopKlient) return 1 } fmt.Printf("Successfully stopped %s\n", config.KlientName) return 0 }
// FactoryAction implements a cli.Command's Action field. func FactoryAction(factory CommandFactory, log logging.Logger, cmdName string) cli.ActionFunc { return func(c *cli.Context) error { cmd := factory(c, log, cmdName) exit, err := cmd.Run() // For API reasons, we may return an error but a zero exit code. So we want // to check and log both. if exit != 0 || err != nil { log.Error( "Command encountered error. command:%s, exit:%d, err:%s", cmdName, exit, err, ) // Print error message to the user. fmt.Fprintf(os.Stderr, "error executing %q command: %s\n", cmdName, err) } Close() ExitFunc(exit) return nil } }
func startKlient(log logging.Logger, s service.Service) error { // For debug purposes, run a health check before we even attempt to start. This // will help give us a sense of what this machine's health check was before // klient tried to start. if res, ok := defaultHealthChecker.CheckAllExceptRunning(); !ok { log.Warning("before attempting to start klient health check returned not-okay. reason: %s", res) } if err := s.Start(); err != nil { log.Error("Error starting Service. err:%s", err) fmt.Println(FailedStartKlient) return err } fmt.Println("Starting...") err := WaitUntilStarted(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) if err != nil { log.Error( "Timed out while waiting for Klient to start. attempts:%d, err:%s", CommandAttempts, err, ) if s, ok := defaultHealthChecker.CheckAllExceptRunning(); !ok { fmt.Printf(`Failed to start %s in time. A health check found the following issue: %s `, config.KlientName, s) } else { fmt.Println(FailedStartKlient) } return err } return nil }
// StartCommand starts local klient. Requires sudo. func StartCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "start") return 1 } log = log.New("cmd:start") s, err := newService(nil) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } // No UX message needed, startKlient will do that itself. if err := startKlient(log, s); err != nil { log.Error("failed to start klient: %s", err) return 1 } fmt.Printf("Successfully started %s\n", config.KlientName) return 0 }
func setPublicChannel(log logging.Logger, group *kodingmodels.Group) { c := models.NewChannel() selector := map[string]interface{}{ "type_constant": models.Channel_TYPE_GROUP, "group_name": models.Channel_KODING_NAME, } err := c.One(bongo.NewQS(selector)) if err != nil && err != bongo.RecordNotFound { log.Error("err while fetching koding channel:", err.Error()) return } if err == bongo.RecordNotFound { log.Debug("postgres group couldn't found, creating it") acc, err := createChannelOwner(group) if err != nil { log.Error(err.Error()) return } c.Name = "public" c.CreatorId = acc.Id c.GroupName = models.Channel_KODING_NAME c.TypeConstant = models.Channel_TYPE_GROUP c.PrivacyConstant = models.Channel_PRIVACY_PUBLIC if err := c.Create(); err != nil { log.Error("err while creating the koding channel: %s", err.Error()) return } } socialApiId := strconv.FormatInt(c.Id, 10) if group.SocialApiChannelId == socialApiId { log.Debug("mongo and postgres socialApiChannelId ids are same") return } log.Debug("mongo and postgres socialApiChannelId ids are different, fixing it") if err := updateGroupPartially(group.Id, "socialApiChannelId", socialApiId); err != nil { log.Error("err while udpating socialApiChannelId: %s", err.Error()) return } }
func (r *Remote) createNewMachineFromKite(c *KodingClient, log logging.Logger) error { var host string host, err := r.hostFromClient(c.Client) if err != nil { log.Error("Unable to extract host from *kite.Client. err:%s", err) return err } // For backwards compatibility, check if the host already has a name // in the cache. // // If the name does not exist in the host the string will be empty, and // Machines.Add() will create a new unique name. // // If the string *does* exist then we use that, remove it from the map, // and save the map to avoid dealing with this next time. name, ok := r.machineNamesCache[host] if ok { log.Info( "Using legacy name, and removing it from database. name:%s, host:%s", name, host, ) delete(r.machineNamesCache, host) // Should't bother exiting here, not a terrible error.. but not good, either. // Log it for knowledge, and move on. if err := r.saveMachinesNames(); err != nil { log.Error("Failed to save machine names. err:%s", err) } } // Name can be empty here, since Machines.Add() will handle creation // of the name. machineMeta := machine.MachineMeta{ URL: c.URL, IP: host, Hostname: c.Hostname, Username: c.Username, Name: name, MachineLabel: c.MachineLabel, Teams: c.Teams, } newMachine, err := machine.NewMachine(machineMeta, r.log, c.Client) if err != nil { return err } if err = r.machines.Add(newMachine); err != nil { log.Error("Unable to Add new machine to *machine.Machines. err:%s", err) return err } return nil }
// RegisterCommand displays version information like Environment or Kite Query ID. func RegisterCommand(c *cli.Context, log logging.Logger, _ string) int { rr := initRegisterRequest(c) r, err := checkAndAskRequiredFields(rr) if err != nil { fmt.Fprintln(os.Stderr, "Register failed with error:", err) log.Error("%s", err) return 1 } host := config.Konfig.Endpoints.Koding.Public.String() client := httputil.DefaultRestClient(false) // TODO ~mehmetali // handle --alreadyMember flag with various option. // There might be some situations that errors need to be ignored token, err := doRegisterRequest(r, client, host) if err != nil { fmt.Fprintln(os.Stderr, "Register failed with error:", err) log.Error("%s", err) return 1 } clientID, err := doLoginRequest(client, host, token) if err != nil { // we don't need to inform user about the error after user registered successfully log.Error("%s", err) return 1 } // team cannot be empty (because of required while registering) // otherwise it return error while registering user // store groupName or slug as "team" inside the cache session := &endpointauth.Session{ ClientID: clientID, Team: r.Slug, } // Set clientId and teamname into the kd.bolt endpointauth.Use(session) return 0 }
// ListCommand returns list of remote machines belonging to user or that can be // accessed by the user. func ListCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "list") return 1 } showAll := c.Bool("all") k, err := klient.CreateKlientWithDefaultOpts() if err != nil { log.Error("Error creating klient client. err:%s", err) fmt.Println(defaultHealthChecker.CheckAllFailureOrMessagef(GenericInternalError)) return 1 } if err := k.Dial(); err != nil { log.Error("Error dialing klient client. err:%s", err) fmt.Println(defaultHealthChecker.CheckAllFailureOrMessagef(GenericInternalError)) return 1 } infos, err := getListOfMachines(k) if err != nil { log.Error("Error listing machines. err:%s", err) fmt.Println(getListErrRes(err, defaultHealthChecker)) return 1 } // Sort our infos sort.Sort(infos) // Filter out infos for listing and json. for i := 0; i < len(infos); i++ { info := &infos[i] onlineRecently := time.Since(info.OnlineAt) <= 24*time.Hour hasMounts := len(info.Mounts) > 0 // Do not show machines that have been offline for more than 24h, // but only if the machine doesn't have any mounts and we aren't using the --all // flag. if !hasMounts && !showAll && !onlineRecently { // Remove this element from the slice, because we're not showing it as // described above. infos = append(infos[:i], infos[i+1:]...) // Decrement the index, since we're removing the item from the slice. i-- continue } // For a more clear UX, replace the team name of the default Koding team, // with Koding.com for i, team := range info.Teams { if team == "Koding" { info.Teams[i] = "koding.com" } } switch info.MachineStatus { case machine.MachineOffline: info.MachineStatusName = "offline" case machine.MachineOnline: info.MachineStatusName = "online" case machine.MachineDisconnected: info.MachineStatusName = "disconnected" case machine.MachineConnected: info.MachineStatusName = "connected" case machine.MachineError: info.MachineStatusName = "error" case machine.MachineRemounting: info.MachineStatusName = "remounting" default: info.MachineStatusName = "unknown" } } if c.Bool("json") { jsonBytes, err := json.MarshalIndent(infos, "", " ") if err != nil { log.Error("Marshalling infos to json failed. err:%s", err) fmt.Println(GenericInternalError) return 1 } fmt.Println(string(jsonBytes)) return 0 } w := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0) fmt.Fprintf(w, "\tTEAM\tLABEL\tIP\tALIAS\tSTATUS\tMOUNTED PATHS\n") for i, info := range infos { // Join multiple teams into a single identifier team := strings.Join(info.Teams, ",") var formattedMount string if len(info.Mounts) > 0 { formattedMount += fmt.Sprintf( "%s -> %s", shortenPath(info.Mounts[0].LocalPath), shortenPath(info.Mounts[0].RemotePath), ) } // Currently we are displaying the status message over the formattedMount, // if it exists. if info.StatusMessage != "" { formattedMount = info.StatusMessage } fmt.Fprintf(w, " %d.\t%s\t%s\t%s\t%s\t%s\t%s\n", i+1, team, info.MachineLabel, info.IP, info.VMName, info.MachineStatusName, formattedMount, ) } w.Flush() return 0 }
// restoreLoadedMachineFromKite takes an old loaded machine and applies meta // changes to it, while also creating a valid machine instance with a proper // transport/etc. func (r *Remote) restoreLoadedMachineFromKite(c *KodingClient, loadedMachine *machine.Machine, log logging.Logger) (bool, error) { // metaChanged is our return value, which lets the caller know if any of the meta // for this machine changed from the given kite. If it does, the caller will // want to save the machines to the local db. var metaChanged bool host, err := r.hostFromClient(c.Client) if err != nil { log.Error("Unable to extract host from *kite.Client. err:%s", err) return false, err } // Get our old machine meta, loaded from the DB machineMeta := loadedMachine.MachineMeta if machineMeta.MachineLabel != c.MachineLabel { machineMeta.MachineLabel = c.MachineLabel metaChanged = true } if machineMeta.URL == "" && c.URL != "" { log.Warning("Machine missing MachineMeta.URL, updating it to %q", c.URL) machineMeta.URL = c.URL metaChanged = true } if machineMeta.IP == "" && host != "" { log.Warning("Machine missing MachineMeta.IP, updating it to %q", host) machineMeta.IP = host metaChanged = true } if machineMeta.Hostname != c.Hostname { machineMeta.Hostname = c.Hostname metaChanged = true } if machineMeta.Username != c.Username { machineMeta.Username = c.Username metaChanged = true } // Remove the machine, because we're going to be creating a new machine instance // below. err = r.machines.Remove(loadedMachine) if err != nil && err != machine.ErrMachineNotFound { log.Error("Unable to Remove old machine from *machine.Machines. err:%s", err) return false, err } restoredMachine, err := machine.NewMachine(machineMeta, r.log, c.Client) if err != nil { return false, err } // Add our newly created machine instance. if err = r.machines.Add(restoredMachine); err != nil { log.Error("Unable to Add new machine to *machine.Machines. err:%s", err) return false, err } return metaChanged, nil }
// RunCommandFactory is the factory method for RunCommand. func RunCommandFactory(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) < 1 { cli.ShowCommandHelp(c, "run") return 1 } // get the path where the command was run localPath, err := filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { log.Error("Failed to create absolute directory. err:%s", err) fmt.Println(GenericInternalError) return 1 } r, err := NewRunCommand() if err != nil { log.Error("Failed to initialize command. err:%s", err) fmt.Println(GenericInternalError) return 1 } var ( cmdWithArgs = os.Args[2:] cmdWithArgsStr = strings.Join(cmdWithArgs, " ") ) res, err := r.runOnRemote(localPath, cmdWithArgsStr) if err != nil && err != mountcli.ErrNoMountPath { log.Error("Error running command. err:", err) // Note that we're printing the error here to the user. This seems reasonable // since their own command may have failed, and that information is meaningful // to them. // // Eg, the binary is not executable, or not formatted for their os, or etc. fmt.Println("Error running command: '%s'\n", err) return 1 } if err == mountcli.ErrNoMountPath { fmt.Println("Running on local:", cmdWithArgsStr) return r.runOnLocal(cmdWithArgs) } fmt.Println("Running on remote:", cmdWithArgsStr) // write to standard out stream // this stream can contain values even if exit status is not 0. if res.Stdout != "" { os.Stdout.WriteString(res.Stdout) } if res.Stderr != "" { os.Stderr.WriteString(res.Stderr) } if res.ExitStatus != 0 { return res.ExitStatus } return 0 }
// UpdateCommand updates this binary if there's an update available. func UpdateCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "update") return 1 } var ( forceUpdate = c.Bool("force") klientVersion = c.Int("klient-version") klientChannel = c.String("klient-channel") kdVersion = c.Int("kd-version") kdChannel = c.String("kd-channel") continueUpdate = c.Bool("continue") ) if kdChannel == "" { kdChannel = config.Environment } if klientChannel == "" { klientChannel = config.Environment } // Create and open the log file, to be safe in case it's missing. f, err := createLogFile(LogFilePath) if err != nil { fmt.Println(`Error: Unable to open log files.`) } else { log.SetHandler(logging.NewWriterHandler(f)) log.Info("Update created log file at %q", LogFilePath) } if !shouldTryUpdate(kdVersion, klientVersion, forceUpdate) { yesUpdate, err := checkUpdate() if err != nil { log.Error("Error checking if update is available. err:%s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } if !yesUpdate { fmt.Println("No update available.") return 0 } else { fmt.Println("An update is available.") } } if kdVersion == 0 { var err error kdVersion, err = latestVersion(config.Konfig.Endpoints.KDLatest.Public.String()) if err != nil { log.Error("Error fetching klientctl update version. err: %s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } } if klientVersion == 0 { var err error klientVersion, err = latestVersion(config.Konfig.Endpoints.KlientLatest.Public.String()) if err != nil { log.Error("Error fetching klient update version. err: %s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } } klientPath := filepath.Join(KlientDirectory, "klient") klientctlPath := filepath.Join(KlientctlDirectory, "kd") klientUrl := config.S3Klient(klientVersion, klientChannel) klientctlUrl := config.S3Klientctl(kdVersion, kdChannel) // If --continue is not passed, download kd and then call the new kd binary with // `kd update --continue`, so that the new code handles updates to klient.sh, // service, and any migration code needed. if !continueUpdate { // Only show this message once. fmt.Println("Updating...") if err := downloadRemoteToLocal(klientctlUrl, klientctlPath); err != nil { log.Error("Error downloading klientctl. err:%s", err) fmt.Println(FailedDownloadUpdate) return 1 } flags := flagsFromContext(c) // Very important to pass --continue for the subprocess. // --force also helps ensure it updates, since the subprocess is technically // the latest KD version. flags = append([]string{"update", "--continue=true", "--force=true"}, flags...) log.Info("%s", flags) cmd := exec.Command(klientctlPath, flags...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Error("error during --continue update. err: %s", err) return 1 } return 0 } klientSh := klientSh{ User: konfig.CurrentUser.Username, KlientBinPath: filepath.Join(KlientDirectory, "klient"), } // ensure the klient home dir is writeable by user if klientSh.User != "" { ensureWriteable(KlientctlDirectory, klientSh.User) } opts := &ServiceOptions{ Username: klientSh.User, } s, err := newService(opts) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } fmt.Printf("Stopping %s...\n", config.KlientName) // stop klient before we update it if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } if err := downloadRemoteToLocal(klientUrl, klientPath); err != nil { log.Error("Error downloading klient. err:%s", err) fmt.Println(FailedDownloadUpdate) return 1 } klientScript := filepath.Join(KlientDirectory, "klient.sh") if err := klientSh.Create(klientScript); err != nil { log.Error("Error writing klient.sh file. err:%s", err) fmt.Println(FailedInstallingKlient) return 1 } // try to migrate from old managed klient to new kd-installed klient switch runtime.GOOS { case "darwin": oldS, err := service.New(&serviceProgram{}, &service.Config{ Name: "com.koding.klient", Executable: klientScript, }) if err != nil { break } oldS.Stop() oldS.Uninstall() } // try to uninstall first, otherwise Install may fail if // klient.plist or klient init script already exist s.Uninstall() // Install the klient binary as a OS service if err = s.Install(); err != nil { log.Error("Error installing Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } // start klient now that it's done updating if err := s.Start(); err != nil { log.Error("Error starting Service. err:%s", err) fmt.Println(FailedStartKlient) return 1 } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = uploader.FixPerms() _ = configstore.FixOwner() fmt.Printf("Successfully updated to latest version of %s.\n", config.Name) return 0 }
// The implementation of InstallCommandFactory, with an error return. This // allows us to track the error metrics. func InstallCommandFactory(c *cli.Context, log logging.Logger, _ string) (exit int, err error) { if len(c.Args()) != 1 { cli.ShowCommandHelp(c, "install") return 1, errors.New("incorrect cli usage: no args") } // Now that we created the logfile, set our logger handler to use that newly created // file, so that we can log errors during installation. f, err := createLogFile(LogFilePath) if err != nil { fmt.Println(`Error: Unable to open log files.`) } else { log.SetHandler(logging.NewWriterHandler(f)) log.Info("Installation created log file at %q", LogFilePath) } // Track all failed installations. defer func() { if err != nil { log.Error(err.Error()) metrics.TrackInstallFailed(err.Error(), config.VersionNum()) } }() authToken := c.Args().Get(0) // We need to check if the authToken is somehow empty, because klient // will default to user/pass if there is no auth token (despite setting // the token flag) if strings.TrimSpace(authToken) == "" { cli.ShowCommandHelp(c, "install") return 1, errors.New("incorrect cli usage: missing token") } // Create the installation dir, if needed. if err := os.MkdirAll(KlientDirectory, 0755); err != nil { log.Error( "Error creating klient binary directory(s). path:%s, err:%s", KlientDirectory, err, ) fmt.Println(FailedInstallingKlient) return 1, fmt.Errorf("failed creating klient binary: %s", err) } klientBinPath := filepath.Join(KlientDirectory, "klient") // TODO: Accept `kd install --user foo` flag to replace the // environ checking. klientSh := klientSh{ User: konfig.CurrentUser.Username, KlientBinPath: klientBinPath, } if err := klientSh.Create(filepath.Join(KlientDirectory, "klient.sh")); err != nil { err = fmt.Errorf("error writing klient.sh file: %s", err) fmt.Println(FailedInstallingKlient) return 1, err } fmt.Println("Downloading...") version, err := latestVersion(config.Konfig.Endpoints.KlientLatest.Public.String()) if err != nil { fmt.Printf(FailedDownloadingKlient) return 1, fmt.Errorf("error getting latest klient version: %s", err) } if err := downloadRemoteToLocal(config.S3Klient(version, config.Environment), klientBinPath); err != nil { fmt.Printf(FailedDownloadingKlient) return 1, fmt.Errorf("error downloading klient binary: %s", err) } fmt.Printf("Created %s\n", klientBinPath) fmt.Printf(`Authenticating you to the %s `, config.KlientName) cmd := exec.Command(klientBinPath, "-register", "-token", authToken, "--kontrol-url", strings.TrimSpace(c.String("kontrol")), ) var errBuf bytes.Buffer // Note that we are *only* printing to Stdout. This is done because // Klient logs error messages to Stderr, and we want to control the UX for // that interaction. // // TODO: Logg Klient's Stderr message on error, if any. cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = &errBuf if err := cmd.Run(); err != nil { err = fmt.Errorf("error registering klient: %s, klient stderr: %s", err, errBuf.String()) fmt.Println(FailedRegisteringKlient) return 1, err } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = configstore.FixOwner() opts := &ServiceOptions{ Username: klientSh.User, } // Create our interface to the OS specific service s, err := newService(opts) if err != nil { fmt.Println(GenericInternalNewCodeError) return 1, fmt.Errorf("error creating Service: %s", err) } // try to uninstall first, otherwise Install may fail if // klient.plist or klient init script already exist s.Uninstall() // Install the klient binary as a OS service if err := s.Install(); err != nil { fmt.Println(GenericInternalNewCodeError) return 1, fmt.Errorf("error installing Service: %s", err) } // Tell the service to start. Normally it starts automatically, but // if the user told the service to stop (previously), it may not // start automatically. // // Note that the service may error if it is already running, so // we're ignoring any starting errors here. We will verify the // connection below, anyway. if err := s.Start(); err != nil { fmt.Println(FailedStartKlient) return 1, fmt.Errorf("error starting klient service: %s", err) } fmt.Println("Verifying installation...") err = WaitUntilStarted(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) // After X times, if err != nil we failed to connect to klient. // Inform the user. if err != nil { fmt.Println(FailedInstallingKlient) return 1, fmt.Errorf("error verifying the installation of klient: %s", err) } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = uploader.FixPerms() // track metrics metrics.TrackInstall(config.VersionNum()) fmt.Printf("\n\nSuccessfully installed and started the %s!\n", config.KlientName) return 0, nil }
// SSHCommandFactory is the factory method for SSHCommand. func SSHCommandFactory(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 1 { cli.ShowCommandHelp(c, "ssh") return 1 } if c.Bool("debug") { log.SetLevel(logging.DEBUG) } opts := ssh.SSHCommandOpts{ Debug: c.Bool("debug") || config.Konfig.Debug, RemoteUsername: c.String("username"), Ask: true, } cmd, err := ssh.NewSSHCommand(log, opts) mountName := c.Args()[0] // TODO: Refactor SSHCommand instance to require no initialization, // and thus avoid needing to log an error in a weird place. if err != nil { log.Error("Error initializing ssh: %s", err) switch err { case ssh.ErrLocalDialingFailed: fmt.Println( defaultHealthChecker.CheckAllFailureOrMessagef(KlientIsntRunning), ) default: fmt.Println(GenericInternalError) } metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) return 1 } now := time.Now() // track metrics go func() { metrics.TrackSSH(mountName, config.VersionNum()) }() err = cmd.Run(mountName) switch err { case nil: metrics.TrackSSHEnd(mountName, "", -now.Sub(now).Minutes(), config.VersionNum()) return 0 case ssh.ErrMachineNotFound: fmt.Println(MachineNotFound) case ssh.ErrCannotFindUser: fmt.Println(CannotFindSSHUser) metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) case ssh.ErrFailedToGetSSHKey: fmt.Println(FailedGetSSHKey) metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) case ssh.ErrMachineNotValidYet: fmt.Println(defaultHealthChecker.CheckAllFailureOrMessagef(MachineNotValidYet)) metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) case ssh.ErrRemoteDialingFailed: fmt.Println(defaultHealthChecker.CheckAllFailureOrMessagef(FailedDialingRemote)) metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) case shortcut.ErrMachineNotFound: fmt.Println(MachineNotFound) metrics.TrackSSHFailed(mountName, err.Error(), config.VersionNum()) } log.Error("SSHCommand.Run returned err:%s", err) // ssh returns `exit status 255` on disconnection; so we also send how long // session has been running for to indicate if ssh was successful at least // once and the failed due to disconnection metrics.TrackSSHEnd(mountName, err.Error(), -now.Sub(now).Minutes(), config.VersionNum()) return 1 }