// NewAddresses fetches EC2 IP address list from each region. // // If log is nil, defaultLogger is used instead. func NewAddresses(clients *amazon.Clients, log logging.Logger) *Addresses { if log == nil { log = defaultLogger } a := newAddresses() var wg sync.WaitGroup var mu sync.Mutex // protects a.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() addresses, err := client.Addresses() if err != nil { log.Error("[%s] fetching IP addresses error: %s", region, err) return } log.Info("[%s] fetched %d addresses", region, len(addresses)) var ok bool mu.Lock() if _, ok = a.m[client]; !ok { a.m[client] = addresses } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, addresses)) } }(region, client) } wg.Wait() return a }
// NewMultiInstances fetches EC2 instance list from each region. func NewMultiInstances(clients *amazon.Clients, log logging.Logger) *MultiInstances { if log == nil { log = defaultLogger } var m = newMultiInstances() var wg sync.WaitGroup var mu sync.Mutex // protects m.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() instances, err := client.Instances() if err != nil { log.Error("[%s] fetching instances error: %s", region, err) return } log.Info("[%s] fetched %d instances", region, len(instances)) i := make(Instances, len(instances)) for _, instance := range instances { i[aws.StringValue(instance.InstanceId)] = instance } var ok bool mu.Lock() if _, ok = m.m[client]; !ok { m.m[client] = i } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, i)) } }(region, client) } wg.Wait() return m }
func (r *Remote) createNewMachineFromKite(c *KodingClient, log logging.Logger) error { var host string host, err := r.hostFromClient(c.Client) if err != nil { log.Error("Unable to extract host from *kite.Client. err:%s", err) return err } // For backwards compatibility, check if the host already has a name // in the cache. // // If the name does not exist in the host the string will be empty, and // Machines.Add() will create a new unique name. // // If the string *does* exist then we use that, remove it from the map, // and save the map to avoid dealing with this next time. name, ok := r.machineNamesCache[host] if ok { log.Info( "Using legacy name, and removing it from database. name:%s, host:%s", name, host, ) delete(r.machineNamesCache, host) // Should't bother exiting here, not a terrible error.. but not good, either. // Log it for knowledge, and move on. if err := r.saveMachinesNames(); err != nil { log.Error("Failed to save machine names. err:%s", err) } } // Name can be empty here, since Machines.Add() will handle creation // of the name. machineMeta := machine.MachineMeta{ URL: c.URL, IP: host, Hostname: c.Hostname, Username: c.Username, Name: name, MachineLabel: c.MachineLabel, Teams: c.Teams, } newMachine, err := machine.NewMachine(machineMeta, r.log, c.Client) if err != nil { return err } if err = r.machines.Add(newMachine); err != nil { log.Error("Unable to Add new machine to *machine.Machines. err:%s", err) return err } return nil }
func setDefaults(log logging.Logger) { group, err := modelhelper.GetGroup(models.Channel_KODING_NAME) if err != nil { log.Error("err while fetching koding group: %s", err.Error()) return } log.Debug("mongo group found") setPublicChannel(log, group) setChangeLogChannel(log, group) log.Info("socialApi defaults are created") }
// New creates a new terraformer func New(conf *Config, log logging.Logger) (*Terraformer, error) { ls, err := storage.NewFile(conf.LocalStorePath, log) if err != nil { return nil, fmt.Errorf("error while creating local store: %s", err) } var rs storage.Interface if conf.AWS.Key != "" && conf.AWS.Secret != "" && conf.AWS.Bucket != "" { s3, err := storage.NewS3(conf.AWS.Key, conf.AWS.Secret, conf.AWS.Bucket, log) if err != nil { return nil, fmt.Errorf("error while creating remote store: %s", err) } rs = s3 } else { remotePath := filepath.Dir(conf.LocalStorePath) if conf.AWS.Bucket != "" { remotePath = filepath.Join(remotePath, conf.AWS.Bucket) } else { remotePath = filepath.Join(remotePath, filepath.Base(conf.LocalStorePath)+".remote") } local, err := storage.NewFile(remotePath, log) if err != nil { return nil, fmt.Errorf("error while creating remote store on local: %s", err) } log.Info("no S3 credentials, using local storage: %s", remotePath) rs = local } c, err := kodingcontext.New(ls, rs, log, conf.Debug) if err != nil { return nil, err } t := &Terraformer{ Log: log, Metrics: common.MustInitMetrics(Name), Debug: conf.Debug, Context: c, Config: conf, closeChan: make(chan struct{}), } t.handleSignals() return t, nil }
func setChangeLogChannel(log logging.Logger, group *kodingmodels.Group) { c := models.NewChannel() selector := map[string]interface{}{ "type_constant": models.Channel_TYPE_ANNOUNCEMENT, "group_name": models.Channel_KODING_NAME, } // if err is nil // it means we already have that channel err := c.One(bongo.NewQS(selector)) if err != nil && err != bongo.RecordNotFound { log.Error("err while fetching changelog channel:", err.Error()) return } if err == bongo.RecordNotFound { log.Error("postgres changelog couldn't found, creating it") acc, err := createChannelOwner(group) if err != nil { log.Error(err.Error()) return } c.Name = "changelog" c.CreatorId = acc.Id c.GroupName = models.Channel_KODING_NAME c.TypeConstant = models.Channel_TYPE_ANNOUNCEMENT c.PrivacyConstant = models.Channel_PRIVACY_PRIVATE if err := c.Create(); err != nil { log.Error("err while creating the koding channel:", err.Error()) return } } socialApiAnnouncementChannelId := strconv.FormatInt(c.Id, 10) if group.SocialApiAnnouncementChannelId == socialApiAnnouncementChannelId { log.Info("mongo and postgres socialApiAnnouncementChannel ids are same") return } log.Debug("mongo and postgres socialApiAnnouncementChannel ids are different, fixing it") if err := updateGroupPartially(group.Id, "socialApiAnnouncementChannelId", strconv.FormatInt(c.Id, 10)); err != nil { log.Error("err while udpating socialApiAnnouncementChannelId:", err.Error()) return } }
func MustInitBongo( appName string, eventExchangeName string, c *Config, log logging.Logger, metrics *metrics.Metrics, debug bool, ) *bongo.Bongo { rmqConf := &rabbitmq.Config{ Host: c.Mq.Host, Port: c.Mq.Port, Username: c.Mq.Login, Password: c.Mq.Password, Vhost: c.Mq.Vhost, } bConf := &broker.Config{ RMQConfig: rmqConf, ExchangeName: eventExchangeName, QOS: 10, } db := MustInitDB(c, log, debug) broker := broker.New(appName, bConf, log) // set metrics for broker broker.Metrics = metrics bongo := bongo.New(broker, db, log) err := bongo.Connect() if err != nil { log.Fatal("Error while starting bongo, exiting err: %s", err.Error()) } log.Info("Caching disabled: %v", c.DisableCaching) if !c.DisableCaching { redisConn, err := InitRedisConn(c) if err != nil { log.Critical("Bongo couldnt connect to redis, caching will not be available Err: %s", err.Error()) } else { bongo.Cache = redisConn } } return bongo }
// startIntervalerIfNeeded starts the given rsync interval, logs any errors, and adds the // resulting Intervaler to the Mount struct for later Stoppage. func startIntervalerIfNeeded(log logging.Logger, remoteMachine *machine.Machine, c *rsync.Client, opts rsync.SyncIntervalOpts) { log = log.New("startIntervalerIfNeeded") if opts.Interval <= 0 { // Using debug, because this is not an error - just informative. log.Debug( "startIntervalerIfNeeded() called with interval:%d. Cannot start Intervaler", opts.Interval, ) return } log.Info("Creating and starting RSync SyncInterval") intervaler, err := c.SyncInterval(opts) if err != nil { log.Error("rsync SyncInterval returned an error:%s", err) return } remoteMachine.Intervaler = intervaler }
func registerSignalHandler(l *asgd.LifeCycle, log logging.Logger) chan struct{} { done := make(chan struct{}, 1) go func() { signals := make(chan os.Signal, 1) signal.Notify(signals) signal := <-signals switch signal { case syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGSTOP, syscall.SIGKILL: log.Info("recieved exit signal, closing...") err := l.Close() if err != nil { log.Critical(err.Error()) } close(done) } }() return done }
// UpdateCommand updates this binary if there's an update available. func UpdateCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "update") return 1 } var ( forceUpdate = c.Bool("force") klientVersion = c.Int("klient-version") klientChannel = c.String("klient-channel") kdVersion = c.Int("kd-version") kdChannel = c.String("kd-channel") continueUpdate = c.Bool("continue") ) if kdChannel == "" { kdChannel = config.Environment } if klientChannel == "" { klientChannel = config.Environment } // Create and open the log file, to be safe in case it's missing. f, err := createLogFile(LogFilePath) if err != nil { fmt.Println(`Error: Unable to open log files.`) } else { log.SetHandler(logging.NewWriterHandler(f)) log.Info("Update created log file at %q", LogFilePath) } if !shouldTryUpdate(kdVersion, klientVersion, forceUpdate) { yesUpdate, err := checkUpdate() if err != nil { log.Error("Error checking if update is available. err:%s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } if !yesUpdate { fmt.Println("No update available.") return 0 } else { fmt.Println("An update is available.") } } if kdVersion == 0 { var err error kdVersion, err = latestVersion(config.Konfig.Endpoints.KDLatest.Public.String()) if err != nil { log.Error("Error fetching klientctl update version. err: %s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } } if klientVersion == 0 { var err error klientVersion, err = latestVersion(config.Konfig.Endpoints.KlientLatest.Public.String()) if err != nil { log.Error("Error fetching klient update version. err: %s", err) fmt.Println(FailedCheckingUpdateAvailable) return 1 } } klientPath := filepath.Join(KlientDirectory, "klient") klientctlPath := filepath.Join(KlientctlDirectory, "kd") klientUrl := config.S3Klient(klientVersion, klientChannel) klientctlUrl := config.S3Klientctl(kdVersion, kdChannel) // If --continue is not passed, download kd and then call the new kd binary with // `kd update --continue`, so that the new code handles updates to klient.sh, // service, and any migration code needed. if !continueUpdate { // Only show this message once. fmt.Println("Updating...") if err := downloadRemoteToLocal(klientctlUrl, klientctlPath); err != nil { log.Error("Error downloading klientctl. err:%s", err) fmt.Println(FailedDownloadUpdate) return 1 } flags := flagsFromContext(c) // Very important to pass --continue for the subprocess. // --force also helps ensure it updates, since the subprocess is technically // the latest KD version. flags = append([]string{"update", "--continue=true", "--force=true"}, flags...) log.Info("%s", flags) cmd := exec.Command(klientctlPath, flags...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Error("error during --continue update. err: %s", err) return 1 } return 0 } klientSh := klientSh{ User: konfig.CurrentUser.Username, KlientBinPath: filepath.Join(KlientDirectory, "klient"), } // ensure the klient home dir is writeable by user if klientSh.User != "" { ensureWriteable(KlientctlDirectory, klientSh.User) } opts := &ServiceOptions{ Username: klientSh.User, } s, err := newService(opts) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } fmt.Printf("Stopping %s...\n", config.KlientName) // stop klient before we update it if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } if err := downloadRemoteToLocal(klientUrl, klientPath); err != nil { log.Error("Error downloading klient. err:%s", err) fmt.Println(FailedDownloadUpdate) return 1 } klientScript := filepath.Join(KlientDirectory, "klient.sh") if err := klientSh.Create(klientScript); err != nil { log.Error("Error writing klient.sh file. err:%s", err) fmt.Println(FailedInstallingKlient) return 1 } // try to migrate from old managed klient to new kd-installed klient switch runtime.GOOS { case "darwin": oldS, err := service.New(&serviceProgram{}, &service.Config{ Name: "com.koding.klient", Executable: klientScript, }) if err != nil { break } oldS.Stop() oldS.Uninstall() } // try to uninstall first, otherwise Install may fail if // klient.plist or klient init script already exist s.Uninstall() // Install the klient binary as a OS service if err = s.Install(); err != nil { log.Error("Error installing Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } // start klient now that it's done updating if err := s.Start(); err != nil { log.Error("Error starting Service. err:%s", err) fmt.Println(FailedStartKlient) return 1 } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = uploader.FixPerms() _ = configstore.FixOwner() fmt.Printf("Successfully updated to latest version of %s.\n", config.Name) return 0 }
// The implementation of InstallCommandFactory, with an error return. This // allows us to track the error metrics. func InstallCommandFactory(c *cli.Context, log logging.Logger, _ string) (exit int, err error) { if len(c.Args()) != 1 { cli.ShowCommandHelp(c, "install") return 1, errors.New("incorrect cli usage: no args") } // Now that we created the logfile, set our logger handler to use that newly created // file, so that we can log errors during installation. f, err := createLogFile(LogFilePath) if err != nil { fmt.Println(`Error: Unable to open log files.`) } else { log.SetHandler(logging.NewWriterHandler(f)) log.Info("Installation created log file at %q", LogFilePath) } // Track all failed installations. defer func() { if err != nil { log.Error(err.Error()) metrics.TrackInstallFailed(err.Error(), config.VersionNum()) } }() authToken := c.Args().Get(0) // We need to check if the authToken is somehow empty, because klient // will default to user/pass if there is no auth token (despite setting // the token flag) if strings.TrimSpace(authToken) == "" { cli.ShowCommandHelp(c, "install") return 1, errors.New("incorrect cli usage: missing token") } // Create the installation dir, if needed. if err := os.MkdirAll(KlientDirectory, 0755); err != nil { log.Error( "Error creating klient binary directory(s). path:%s, err:%s", KlientDirectory, err, ) fmt.Println(FailedInstallingKlient) return 1, fmt.Errorf("failed creating klient binary: %s", err) } klientBinPath := filepath.Join(KlientDirectory, "klient") // TODO: Accept `kd install --user foo` flag to replace the // environ checking. klientSh := klientSh{ User: konfig.CurrentUser.Username, KlientBinPath: klientBinPath, } if err := klientSh.Create(filepath.Join(KlientDirectory, "klient.sh")); err != nil { err = fmt.Errorf("error writing klient.sh file: %s", err) fmt.Println(FailedInstallingKlient) return 1, err } fmt.Println("Downloading...") version, err := latestVersion(config.Konfig.Endpoints.KlientLatest.Public.String()) if err != nil { fmt.Printf(FailedDownloadingKlient) return 1, fmt.Errorf("error getting latest klient version: %s", err) } if err := downloadRemoteToLocal(config.S3Klient(version, config.Environment), klientBinPath); err != nil { fmt.Printf(FailedDownloadingKlient) return 1, fmt.Errorf("error downloading klient binary: %s", err) } fmt.Printf("Created %s\n", klientBinPath) fmt.Printf(`Authenticating you to the %s `, config.KlientName) cmd := exec.Command(klientBinPath, "-register", "-token", authToken, "--kontrol-url", strings.TrimSpace(c.String("kontrol")), ) var errBuf bytes.Buffer // Note that we are *only* printing to Stdout. This is done because // Klient logs error messages to Stderr, and we want to control the UX for // that interaction. // // TODO: Logg Klient's Stderr message on error, if any. cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = &errBuf if err := cmd.Run(); err != nil { err = fmt.Errorf("error registering klient: %s, klient stderr: %s", err, errBuf.String()) fmt.Println(FailedRegisteringKlient) return 1, err } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = configstore.FixOwner() opts := &ServiceOptions{ Username: klientSh.User, } // Create our interface to the OS specific service s, err := newService(opts) if err != nil { fmt.Println(GenericInternalNewCodeError) return 1, fmt.Errorf("error creating Service: %s", err) } // try to uninstall first, otherwise Install may fail if // klient.plist or klient init script already exist s.Uninstall() // Install the klient binary as a OS service if err := s.Install(); err != nil { fmt.Println(GenericInternalNewCodeError) return 1, fmt.Errorf("error installing Service: %s", err) } // Tell the service to start. Normally it starts automatically, but // if the user told the service to stop (previously), it may not // start automatically. // // Note that the service may error if it is already running, so // we're ignoring any starting errors here. We will verify the // connection below, anyway. if err := s.Start(); err != nil { fmt.Println(FailedStartKlient) return 1, fmt.Errorf("error starting klient service: %s", err) } fmt.Println("Verifying installation...") err = WaitUntilStarted(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) // After X times, if err != nil we failed to connect to klient. // Inform the user. if err != nil { fmt.Println(FailedInstallingKlient) return 1, fmt.Errorf("error verifying the installation of klient: %s", err) } // Best-effort attempts at fixinig permissions and ownership, ignore any errors. _ = uploader.FixPerms() // track metrics metrics.TrackInstall(config.VersionNum()) fmt.Printf("\n\nSuccessfully installed and started the %s!\n", config.KlientName) return 0, nil }