// Join copies data between local and remote connections. // It reads from one connection and writes to the other. // It's a building block for ProxyFunc implementations. func Join(local, remote net.Conn, log logging.Logger) { var wg sync.WaitGroup wg.Add(2) transfer := func(side string, dst, src net.Conn) { log.Debug("proxing %s -> %s", src.RemoteAddr(), dst.RemoteAddr()) n, err := io.Copy(dst, src) if err != nil { log.Error("%s: copy error: %s", side, err) } if err := src.Close(); err != nil { log.Debug("%s: close error: %s", side, err) } // not for yamux streams, but for client to local server connections if d, ok := dst.(*net.TCPConn); ok { if err := d.CloseWrite(); err != nil { log.Debug("%s: closeWrite error: %s", side, err) } } wg.Done() log.Debug("done proxing %s -> %s: %d bytes", src.RemoteAddr(), dst.RemoteAddr(), n) } go transfer("remote to local", local, remote) go transfer("local to remote", remote, local) wg.Wait() }
// UninstallCommand configures the Uninstall struct and calls it based on the // given codegangsta/cli context. // // TODO: remove all artifacts, ie bolt db, ssh keys, kd etc. func UninstallCommand(c *cli.Context, log logging.Logger, _ string) (string, int) { warnings := []string{} // Ensure /etc/kite/kite.key is migrated to konfig.bolt before // old klient gets uninstalled. The endpoint/config package // performs lazy migrations, so it's enough to call any of // its methods and disregard the result. _ = configcli.List() s, err := newService(nil) if err != nil { log.Warning("Failed creating Service for uninstall. err:%s", err) warnings = append(warnings, FailedUninstallingKlientWarn) } uninstaller := &Uninstall{ ServiceUninstaller: s, KlientName: config.KlientName, KlientctlName: config.Name, KlientctlPath: filepath.Join(KlientctlDirectory, KlientctlBinName), // TODO: Store the klient directory structure(s) somewhere KlientParentDirectory: "/opt", KlientDirectory: "kite/klient", KlientFilename: "klient", KlientshFilename: "klient.sh", remover: os.Remove, warnings: warnings, log: log, } return uninstaller.Uninstall() }
// NewMachine initializes a new Machine struct with any internal vars created. func NewMachine(meta MachineMeta, log logging.Logger, t Transport) (*Machine, error) { log = MachineLogger(meta, log) // Create our Pingers, to be used in the PingTrackers kitePinger := kitepinger.NewKitePinger(t) httpPinger, err := kitepinger.NewKiteHTTPPinger(meta.URL) if err != nil { log.Error( "Unable to create HTTPPinger from meta.URL. url:%s, err:%s", meta.URL, err, ) return nil, err } m := &Machine{ MachineMeta: meta, Log: log, KiteTracker: kitepinger.NewPingTracker(kitePinger), HTTPTracker: kitepinger.NewPingTracker(httpPinger), Transport: t, discover: discover.NewClient(), mountLocker: util.NewMutexWithState(), } m.discover.Log = m.Log.New("discover") // Start our http pinger, to give online/offline statuses for all machines. m.HTTPTracker.Start() return m, nil }
func NewStatus(log logging.Logger, mg MachineGetter) *Status { return &Status{ Log: log.New("status"), MachineGetter: mg, HTTPClient: defaultHTTPClient, } }
// NewAddresses fetches EC2 IP address list from each region. // // If log is nil, defaultLogger is used instead. func NewAddresses(clients *amazon.Clients, log logging.Logger) *Addresses { if log == nil { log = defaultLogger } a := newAddresses() var wg sync.WaitGroup var mu sync.Mutex // protects a.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() addresses, err := client.Addresses() if err != nil { log.Error("[%s] fetching IP addresses error: %s", region, err) return } log.Info("[%s] fetched %d addresses", region, len(addresses)) var ok bool mu.Lock() if _, ok = a.m[client]; !ok { a.m[client] = addresses } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, addresses)) } }(region, client) } wg.Wait() return a }
// NewMultiInstances fetches EC2 instance list from each region. func NewMultiInstances(clients *amazon.Clients, log logging.Logger) *MultiInstances { if log == nil { log = defaultLogger } var m = newMultiInstances() var wg sync.WaitGroup var mu sync.Mutex // protects m.m for region, client := range clients.Regions() { wg.Add(1) go func(region string, client *amazon.Client) { defer wg.Done() instances, err := client.Instances() if err != nil { log.Error("[%s] fetching instances error: %s", region, err) return } log.Info("[%s] fetched %d instances", region, len(instances)) i := make(Instances, len(instances)) for _, instance := range instances { i[aws.StringValue(instance.InstanceId)] = instance } var ok bool mu.Lock() if _, ok = m.m[client]; !ok { m.m[client] = i } mu.Unlock() if ok { panic(fmt.Errorf("[%s] duplicated client=%p: %+v", region, client, i)) } }(region, client) } wg.Wait() return m }
func CreateMetrics(appName string, log logging.Logger, outputMetrics bool) (*kodingmetrics.Metrics, *kodingmetrics.DogStatsD) { metric := kodingmetrics.New(appName) // if outputMetrics, do print output to the console if outputMetrics { // change those loggers // https://github.com/rcrowley/go-metrics/blob/37df06ff62a7d8b4473b48d355008c838da87561/log.go // get those numbers from config // output metrics every 1 minutes go metrics.Log(metric.Registry, 6e10, slog.New(os.Stderr, "metrics: ", slog.Lmicroseconds)) } // Left here for future reference // for Mac syslogPath := "/var/run/syslog" if runtime.GOOS != "darwin" { // for linux syslogPath = "/dev/log" } w, err := syslog.Dial("unixgram", syslogPath, syslog.LOG_INFO, "socialapi-metrics") if err != nil { log.Error("Err while initing syslog for metrics, metrics wont be in the syslog %s", err.Error()) } else { go metrics.Syslog(metric.Registry, 30e10, w) } statsd, err := kodingmetrics.NewDogStatsD(appName) if err == nil { go kodingmetrics.Collect(metric.Registry, statsd, 24e10) } return metric, statsd }
// MachineListCommand returns list of remote machines belonging to the user or // that can be accessed by her. func MachineListCommand(c *cli.Context, log logging.Logger, _ string) (int, error) { // List command doesn't support identifiers. idents, err := getIdentifiers(c) if err != nil { return 1, err } if err := identifiersLimit(idents, 0, 0); err != nil { return 1, err } opts := &machine.ListOptions{ Log: log.New("machine:list"), } infos, err := machine.List(opts) if err != nil { return 1, err } if c.Bool("json") { enc := json.NewEncoder(os.Stdout) enc.SetIndent("", "\t") enc.Encode(infos) return 0, nil } tabFormatter(os.Stdout, infos) return 0, nil }
func NewErrorCommand(stdout io.Writer, log logging.Logger, err error, msg string) *ErrorCommand { return &ErrorCommand{ Stdout: util.NewFprint(stdout), Log: log.New("errorCommand"), Message: msg, Error: err, } }
// NewRecordManager creates a RecordManager func NewRecordManager(session *session.Session, log logging.Logger, region string, hostedZoneConf HostedZone) *RecordManager { return &RecordManager{ route53: route53.New(session), log: log.New("recordmanager"), region: region, hostedZoneConf: hostedZoneConf, } }
func NewPagerduty(pc *PagerdutyConfig, log logging.Logger) (*Pagerduty, error) { return &Pagerduty{ publicURL: pc.PublicURL, integrationURL: pc.IntegrationURL, log: log.New(PAGERDUTY), }, nil }
func NewDefaultHealthChecker(l kodinglogging.Logger) *HealthChecker { return &HealthChecker{ Log: l.New("HealthChecker"), HTTPClient: defaultClient, LocalKlientAddress: config.Konfig.Endpoints.Klient.Private.String(), KontrolAddress: config.Konfig.Endpoints.Kontrol().Public.String(), InternetCheckAddress: config.Konfig.Endpoints.KlientLatest.Public.String(), TunnelKiteAddress: config.Konfig.Endpoints.Tunnel.Public.String(), } }
// RestartCommand stops and starts klient. If Klient is not running to begin // with, it *just* starts klient. func RestartCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "restart") return 1 } log = log.New("cmd:restart") s, err := newService(nil) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalNewCodeError) return 1 } fmt.Printf("Restarting the %s, this may take a moment...\n", config.KlientName) klientWasRunning := IsKlientRunning(config.Konfig.Endpoints.Klient.Private.String()) if klientWasRunning { // If klient is running, stop it, and tell the user if we fail if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } } else { // If klient appears to not be running, try to stop it anyway. However, // because it may not actually be running, don't inform the user if we fail here. s.Stop() } err = WaitUntilStopped(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) if err != nil { log.Error( "Timed out while waiting for Klient to start. attempts:%d, err:%s", 5, err, ) fmt.Println(FailedStopKlient) return 1 } if klientWasRunning { fmt.Println("Stopped successfully.") } // No UX message needed, startKlient will do that itself. if err := startKlient(log, s); err != nil { log.Error("failed to start klient: %s", err) return 1 } fmt.Printf("Successfully restarted %s\n", config.KlientName) return 0 }
func newCommand(cwd string, log logging.Logger) *command { cmd := &command{ cwd: cwd, } if log != nil { cmd.log = log.New(cwd) } return cmd }
func NewPivotal(pc *PivotalConfig, log logging.Logger) (*Pivotal, error) { if pc.ServerURL == "" { pc.ServerURL = PivotalServerURL } return &Pivotal{ serverURL: pc.ServerURL, publicURL: pc.PublicURL, integrationURL: pc.IntegrationURL, log: log.New(PIVOTAL), }, nil }
// NewLifeCycle creates a new lifecycle management system, everything begins // with an autoscaling resource, we are listening to any change on that // resource, to be able to listen them we are attaching a notification // configuration to given autoscaling resource, notification configuration works // with a TopicARN, which is basically a SNS Topic, to be able to listen from a // Topic ARN we need a SQS, SQS is attached to Notification Topic and configured // to pass events as soon as they occur, it also has re- try mechanism. One // event only be handled by one manager, there wont be any race condition on // processing that particular message. Manager is idempotent, if any given // resource doesnt exist in the given AWS system, it will create or re-use the // previous ones func NewLifeCycle(session *session.Session, log logging.Logger, asgName string) *LifeCycle { return &LifeCycle{ closed: false, closeChan: make(chan chan struct{}), ec2: ec2.New(session), sqs: sqs.New(session), sns: sns.New(session), autoscaling: autoscaling.New(session), asgName: &asgName, log: log.New("lifecycle"), } }
// fetchHostedZone fetches all hosted zones from account and iterates over them // until it finds the respective one func (r *RecordManager) fetchHostedZone(hostedZoneLogger logging.Logger) error { const maxIterationCount = 100 iteration := 0 // for pagination var nextMarker *string // try to get our hosted zone for { // just be paranoid about remove api calls, dont harden too much if iteration == maxIterationCount { return errors.New("iteration terminated") } log := hostedZoneLogger.New("iteration", iteration) iteration++ log.Debug("Fetching hosted zone") listHostedZonesResp, err := r.route53.ListHostedZones( &route53.ListHostedZonesInput{ Marker: nextMarker, }, // we dont have anything to filter ) if err != nil { return err } if listHostedZonesResp == nil || listHostedZonesResp.HostedZones == nil { return errors.New("malformed response - reponse or hosted zone is nil") } for _, hostedZone := range listHostedZonesResp.HostedZones { if hostedZone == nil || hostedZone.CallerReference == nil { continue } if *hostedZone.CallerReference == r.hostedZoneConf.CallerReference { r.hostedZone = hostedZone return nil } } // if our result set is truncated we can try to fetch again, but if we // reach to end, nothing to do left if listHostedZonesResp.IsTruncated == nil || !*listHostedZonesResp.IsTruncated { return errHostedZoneNotFound } // assign next marker nextMarker = listHostedZonesResp.NextMarker } }
func New(log logging.Logger, client algoliasearch.Client, indexSuffix string) *Controller { // TODO later on listen channel_participant_added event and remove this koding channel fetch c := models.NewChannel() q := request.NewQuery() q.GroupName = "koding" q.Name = "public" q.Type = models.Channel_TYPE_GROUP channel, err := c.ByName(q) if err != nil { log.Error("Could not fetch koding channel: %s:", err) } var channelId string if channel.Id != 0 { channelId = strconv.FormatInt(channel.Id, 10) } controller := &Controller{ log: log, client: client, indexes: &IndexSet{ IndexTopics: &IndexSetItem{ Index: client.InitIndex(IndexTopics + indexSuffix), Settings: &Settings{ // empty slice means all properties will be searchable AttributesToIndex: []string{}, }, }, IndexAccounts: &IndexSetItem{ Index: client.InitIndex(IndexAccounts + indexSuffix), Settings: &Settings{ AttributesToIndex: []string{ "nick", "email", "firstName", "lastName", "_tags", }, UnretrievableAttributes: []string{"email"}, }, }, IndexMessages: &IndexSetItem{ Index: client.InitIndex(IndexMessages + indexSuffix), Settings: &Settings{ AttributesToIndex: []string{}, }, }, }, kodingChannelId: channelId, } return controller }
// New creates a new terraformer func New(conf *Config, log logging.Logger) (*Terraformer, error) { ls, err := storage.NewFile(conf.LocalStorePath, log) if err != nil { return nil, fmt.Errorf("error while creating local store: %s", err) } var rs storage.Interface if conf.AWS.Key != "" && conf.AWS.Secret != "" && conf.AWS.Bucket != "" { s3, err := storage.NewS3(conf.AWS.Key, conf.AWS.Secret, conf.AWS.Bucket, log) if err != nil { return nil, fmt.Errorf("error while creating remote store: %s", err) } rs = s3 } else { remotePath := filepath.Dir(conf.LocalStorePath) if conf.AWS.Bucket != "" { remotePath = filepath.Join(remotePath, conf.AWS.Bucket) } else { remotePath = filepath.Join(remotePath, filepath.Base(conf.LocalStorePath)+".remote") } local, err := storage.NewFile(remotePath, log) if err != nil { return nil, fmt.Errorf("error while creating remote store on local: %s", err) } log.Info("no S3 credentials, using local storage: %s", remotePath) rs = local } c, err := kodingcontext.New(ls, rs, log, conf.Debug) if err != nil { return nil, err } t := &Terraformer{ Log: log, Metrics: common.MustInitMetrics(Name), Debug: conf.Debug, Context: c, Config: conf, closeChan: make(chan struct{}), } t.handleSignals() return t, nil }
// FactoryCompletion implements codeganstas cli.Command's bash completion field func FactoryCompletion(factory CommandFactory, log logging.Logger, cmdName string) cli.BashCompleteFunc { return func(c *cli.Context) { cmd := factory(c, log, cmdName) // If the command implements AutocompleteCommand, run the autocomplete. if aCmd, ok := cmd.(AutocompleteCommand); ok { if err := aCmd.Autocomplete(c.Args()...); err != nil { log.Error( "Autocompletion of a command encountered error. command:%s, err:%s", cmdName, err, ) } } } }
// ExitErrAction implements a cli.Command's Action field for an ExitingErrCommand func ExitErrAction(f ExitingErrCommand, log logging.Logger, cmdName string) cli.ActionFunc { return func(c *cli.Context) error { exit, err := f(c, log, cmdName) if err != nil { log.Error("ExitErrAction encountered error. err:%s", err) // Print error message to the user. fmt.Fprintf(os.Stderr, "error executing %q command: %s\n", cmdName, err) } Close() ExitFunc(exit) return nil } }
// provisionData creates the base64-json-encoded userdata.Value to be sent // altogether with create request. func (cmd *VagrantCreate) provisionData(log logging.Logger) (string, error) { kiteID := uuid.NewV4().String() keycreator := &keycreator.Key{ KontrolURL: cmd.KontrolURL, KontrolPrivateKey: defaultPrivateKey, KontrolPublicKey: defaultPublicKey, } kiteKey, err := keycreator.Create(cmd.Username, kiteID) if err != nil { return "", err } data := &puser.Value{ Username: cmd.Username, Groups: []string{"sudo"}, Hostname: cmd.Username, KiteKey: kiteKey, LatestKlientURL: cmd.KlientURL, RegisterURL: cmd.RegisterURL, KontrolURL: cmd.KontrolURL, } log.Debug("provision data: %+v", data) p, err := json.Marshal(data) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(p), nil }
// NewBadRequestWithLogger is creating a new http response with predifined http // response properties, it uses a special logger for outputting callstack // properly func NewBadRequestWithLogger(l logging.Logger, err error) (int, http.Header, interface{}, error) { if err == nil { err = errors.New("request is not valid") } // make sure errors are outputted l.Debug("Bad Request: %s", err) // do not expose errors to the client env := config.MustGet().Environment // do not expose errors to the client. if env != "dev" && env != "test" && socialApiEnv != "wercker" { err = genericError } return http.StatusBadRequest, nil, nil, BadRequest{err} }
func (r *Remote) createNewMachineFromKite(c *KodingClient, log logging.Logger) error { var host string host, err := r.hostFromClient(c.Client) if err != nil { log.Error("Unable to extract host from *kite.Client. err:%s", err) return err } // For backwards compatibility, check if the host already has a name // in the cache. // // If the name does not exist in the host the string will be empty, and // Machines.Add() will create a new unique name. // // If the string *does* exist then we use that, remove it from the map, // and save the map to avoid dealing with this next time. name, ok := r.machineNamesCache[host] if ok { log.Info( "Using legacy name, and removing it from database. name:%s, host:%s", name, host, ) delete(r.machineNamesCache, host) // Should't bother exiting here, not a terrible error.. but not good, either. // Log it for knowledge, and move on. if err := r.saveMachinesNames(); err != nil { log.Error("Failed to save machine names. err:%s", err) } } // Name can be empty here, since Machines.Add() will handle creation // of the name. machineMeta := machine.MachineMeta{ URL: c.URL, IP: host, Hostname: c.Hostname, Username: c.Username, Name: name, MachineLabel: c.MachineLabel, Teams: c.Teams, } newMachine, err := machine.NewMachine(machineMeta, r.log, c.Client) if err != nil { return err } if err = r.machines.Add(newMachine); err != nil { log.Error("Unable to Add new machine to *machine.Machines. err:%s", err) return err } return nil }
func registerSignalHandler(l *asgd.LifeCycle, log logging.Logger) chan struct{} { done := make(chan struct{}, 1) go func() { signals := make(chan os.Signal, 1) signal.Notify(signals) signal := <-signals switch signal { case syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGSTOP, syscall.SIGKILL: log.Info("recieved exit signal, closing...") err := l.Close() if err != nil { log.Critical(err.Error()) } close(done) } }() return done }
// StopCommand stop local klient. Requires sudo. func StopCommand(c *cli.Context, log logging.Logger, _ string) int { if len(c.Args()) != 0 { cli.ShowCommandHelp(c, "stop") return 1 } s, err := newService(nil) if err != nil { log.Error("Error creating Service. err:%s", err) fmt.Println(GenericInternalError) return 1 } if err := s.Stop(); err != nil { log.Error("Error stopping Service. err:%s", err) fmt.Println(FailedStopKlient) return 1 } err = WaitUntilStopped(config.Konfig.Endpoints.Klient.Private.String(), CommandAttempts, CommandWaitTime) if err != nil { log.Error( "Timed out while waiting for Klient to start. attempts:%d, err:%s", 5, err, ) fmt.Println(FailedStopKlient) return 1 } fmt.Printf("Successfully stopped %s\n", config.KlientName) return 0 }
// NewSSHCommand is the required initializer for SSHCommand. func NewSSHCommand(log logging.Logger, opts SSHCommandOpts) (*SSHCommand, error) { usr, err := user.Current() if err != nil { return nil, err } klientKite, err := klient.CreateKlientWithDefaultOpts() if err != nil { return nil, err } if err := klientKite.Dial(); err != nil { log.New("NewSSHCommand").Error("Dialing local klient failed. err:%s", err) return nil, ErrLocalDialingFailed } k := klient.NewKlient(klientKite) return &SSHCommand{ Klient: k, Log: log.New("SSHCommand"), Ask: opts.Ask, Debug: opts.Debug, SSHKey: &SSHKey{ Log: log.New("SSHKey"), Debug: opts.Debug, RemoteUsername: opts.RemoteUsername, KeyPath: path.Join(usr.HomeDir, config.SSHDefaultKeyDir), KeyName: config.SSHDefaultKeyName, Klient: k, }, }, nil }
func (l *LifeCycle) createQueue(sqsLogger logging.Logger, queueName string) error { if l.sqs == nil { return errSQSNotSet } // CreateQueue is idempotent, if it is already created returns existing one // all Attributes should be same with consecutive calls createQueueResp, err := l.sqs.CreateQueue( &sqs.CreateQueueInput{ QueueName: aws.String(queueName), // Required Attributes: attributes, }, ) if err != nil { return err } l.queueURL = createQueueResp.QueueUrl // dont forget to assign queue url sqsLogger.Debug("SQS Queue is created") return nil }
// FactoryAction implements a cli.Command's Action field. func FactoryAction(factory CommandFactory, log logging.Logger, cmdName string) cli.ActionFunc { return func(c *cli.Context) error { cmd := factory(c, log, cmdName) exit, err := cmd.Run() // For API reasons, we may return an error but a zero exit code. So we want // to check and log both. if exit != 0 || err != nil { log.Error( "Command encountered error. command:%s, exit:%d, err:%s", cmdName, exit, err, ) // Print error message to the user. fmt.Fprintf(os.Stderr, "error executing %q command: %s\n", cmdName, err) } Close() ExitFunc(exit) return nil } }
// MachineSSHCommand allows to SSH into remote machine. func MachineSSHCommand(c *cli.Context, log logging.Logger, _ string) (int, error) { // SSH command must have only one identifier. idents, err := getIdentifiers(c) if err != nil { return 1, err } if err := identifiersLimit(idents, 1, 1); err != nil { return 1, err } opts := &machine.SSHOptions{ Identifier: idents[0], Username: c.String("username"), Log: log.New("machine:ssh"), } if err := machine.SSH(opts); err != nil { return 1, err } return 0, nil }