func portCheck(t *process.Process, c *config.Config, ctx context.Context) error { if transmission. NewRawClient(c.Transmission.URL.String(), c.Transmission.User, c.Transmission.Pass). CheckPort() { return nil } logger.Infof("Transmission port not open, stopping transmission") t.Stop() ip, er := getIP(c.OpenVPN.Tun, c.Timeout.Duration, ctx) if er != nil { return er } logger.Infof("New bind ip: (%s) %s", c.OpenVPN.Tun, ip) port, er := getPort(ip, c.PIA.User, c.PIA.Pass, c.PIA.ClientID, c.Timeout.Duration, ctx) if er != nil { return er } logger.Infof("New peer port: %d", port) if er := transmission.UpdateSettings(c.Transmission.Config, ip, port); er != nil { return er } logger.Infof("Starting transmission") go t.ExecuteAndRestart(ctx) return nil }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) kingpin.Version(version) kingpin.MustParse(app.Parse(os.Args[1:])) logger.Configure(*level, "[transmon] ", os.Stdout) logger.Infof("Starting transmon version %v", version) c, stop := context.WithCancel(context.Background()) conf, er := config.ReadAndWatch(*conf, c) if er != nil { logger.Fatalf("Failed to read config: %v", er) } go workers(conf, c, stop) if conf.Cleaner.Enabled { go cleaner(conf, c) } sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGTERM, syscall.SIGINT) select { case <-sig: logger.Infof("Received interrupt, shutting down...") close(sig) stop() time.Sleep(3 * time.Second) os.Exit(0) } }
func portUpdate(c *config.Config, ctx context.Context) error { ip, er := getIP(c.OpenVPN.Tun, c.Timeout.Duration, ctx) if er != nil || ctx.Err() != nil { return er } logger.Infof("New bind ip: (%s) %s", c.OpenVPN.Tun, ip) port, er := getPort(ip, c.PIA.User, c.PIA.Pass, c.PIA.ClientID, c.Timeout.Duration, ctx) if er != nil || ctx.Err() != nil { return er } logger.Infof("New peer port: %d", port) notify := func(e error, w time.Duration) { logger.Debugf("Failed to update transmission port: %v", er) } operation := func() error { select { default: return transmission. NewRawClient(c.Transmission.URL.String(), c.Transmission.User, c.Transmission.Pass). UpdatePort(port) case <-ctx.Done(): return nil } } b := backoff.NewExponentialBackOff() b.MaxElapsedTime = c.Timeout.Duration return backoff.RetryNotify(operation, b, notify) }
func startProcesses(t, v *process.Process, c *config.Config, ctx context.Context) error { t.SetUser(uint32(c.Transmission.UID), uint32(c.Transmission.GID)) logger.Infof("Starting openvpn") go v.ExecuteAndRestart(ctx) ip, er := getIP(c.OpenVPN.Tun, c.Timeout.Duration, ctx) if er != nil { return er } logger.Infof("New bind ip: (%s) %s", c.OpenVPN.Tun, ip) port, er := getPort(ip, c.PIA.User, c.PIA.Pass, c.PIA.ClientID, c.Timeout.Duration, ctx) if er != nil { return er } logger.Infof("New peer port: %d", port) if er := transmission.UpdateSettings(c.Transmission.Config, ip, port); er != nil { return er } logger.Infof("Starting transmission") go t.ExecuteAndRestart(ctx) return nil }
func deleteResources(e *Engine, resources kubernetes.ResourceList) error { for _, rsc := range resources { backend, er := e.NewBackend(rsc) if er != nil { return er } frontend, er := e.NewFrontend(rsc) if er != nil { return er } fn := func() error { logger.Infof("Removing %v", frontend) if er := e.DeleteFrontend(frontend); er != nil { return er } logger.Infof("Removing %v", backend) return e.DeleteBackend(backend) } if er := e.Commit(fn); er != nil { return er } } return nil }
func (v *vulcan) UpsertBackend(ba loadbalancer.Backend) error { b, ok := ba.(*backend) if !ok { return loadbalancer.ErrUnexpectedBackendType } if er := v.Client.UpsertBackend(b.Backend); er != nil { return er } extra := make(map[string]loadbalancer.Server) ss, _ := v.Client.GetServers(engine.BackendKey{Id: b.GetID()}) for i := range ss { extra[ss[i].GetId()] = &server{ss[i]} } for _, srv := range b.servers { logger.Infof("Upserting %v", srv) if er := v.UpsertServer(b, srv); er != nil { return er } delete(extra, srv.GetID()) } for _, srv := range extra { logger.Infof("Removing %v", srv) v.DeleteServer(b, srv) } return nil }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) kingpin.Version(getVersion()) kingpin.MustParse(ro.Parse(os.Args[1:])) logger.Configure(*logLevel, "[romulusd] ", os.Stdout) logger.Infof("Starting up romulusd version=%s", getVersion()) ctx, cancel := context.WithCancel(context.Background()) lb, er := getLBProvider(*provider, ctx) if er != nil { logger.Fatalf(er.Error()) } kubernetes.Keyspace = normalizeAnnotationsKey(*annoKey) ng, er := NewEngine((*kubeAPI).String(), *kubeUser, *kubePass, *kubeSec, lb, *timeout, ctx) if er != nil { logger.Fatalf(er.Error()) } if er := ng.Start(*selector, *resync); er != nil { logger.Fatalf(er.Error()) } sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGTERM, syscall.SIGINT) select { case <-sig: logger.Infof("Shutting Down...") cancel() time.Sleep(100 * time.Millisecond) os.Exit(0) } }
func LogCallback(callback string, obj interface{}) { format := "%s %s" switch t := obj.(type) { default: logger.Warnf(format, callback, "<unknown>") case *extensions.Ingress: logger.Infof(format, callback, Ingress(*t)) case *api.Service: logger.Infof(format, callback, Service(*t)) case *api.Endpoints: logger.Infof(format, callback, Endpoints(*t)) } }
func addResources(e *Engine, resources kubernetes.ResourceList) error { backends := make([]loadbalancer.Backend, 0, len(resources)) frontends := make([]loadbalancer.Frontend, 0, len(resources)) for _, rsc := range resources { logger.Debugf("[%v] Build Frontends and Backends", rsc.ID()) backend, er := e.NewBackend(rsc) if er != nil { return er } srvs, er := e.NewServers(rsc) if er != nil { return er } for i := range srvs { backend.AddServer(srvs[i]) } logger.Debugf("[%v] Created new object: %v", rsc.ID(), backend) backends = append(backends, backend) frontend, er := e.NewFrontend(rsc) if er != nil { return er } mids, er := e.NewMiddlewares(rsc) if er != nil { return er } for i := range mids { frontend.AddMiddleware(mids[i]) } frontends = append(frontends, frontend) logger.Debugf("[%v] Created new object: %v", rsc.ID(), frontend) } return e.Commit(func() error { for _, backend := range backends { logger.Infof("Upserting %v", backend) if er := e.UpsertBackend(backend); er != nil { return er } } for _, frontend := range frontends { logger.Infof("Upserting %v", frontend) if er := e.UpsertFrontend(frontend); er != nil { return er } } return nil }) }
func (c *Client) CleanTorrents() error { logger.Infof("Running torrent cleaner") torrents, er := c.GetTorrents() if er != nil { return er } torrents.SortByID(false) logger.Infof("Found %d torrents to process", len(torrents)) for _, t := range torrents { logger.Debugf("[Torrent %d: %q] Checking status", t.ID, t.Name) id := util.Hashf(md5.New(), t.ID, t.Name) status := &torrentStatus{Torrent: t, id: id, failures: 0} status.setFailures() if st, ok := seen[id]; ok { status.failures = status.failures + st.failures if !updated(st.Torrent, status.Torrent) { status.failures++ } } seen[id] = status logger.Debugf("[Torrent %d: %q] Failures: %d", t.ID, t.Name, status.failures) } b := backoff.NewExponentialBackOff() b.MaxElapsedTime = 15 * time.Second remove := make([]*torrentStatus, 0, 1) for _, t := range seen { if t.failed() { b.Reset() logger.Infof("[Torrent %d: %q] Removing", t.ID, t.Name) er := backoff.RetryNotify(delTorrent(c, t.Torrent), b, func(e error, w time.Duration) { logger.Errorf("[Torrent %d: %q] Failed to remove (retry in %v): %v", t.ID, t.Name, w, e) }) if er == nil { remove = append(remove, t) } else { logger.Errorf("[Torrent %d: %q] Failed to remove, will retry next cycle", t.ID, t.Name) } } } for i := range remove { delete(seen, remove[i].id) } return nil }
func ReadAndWatch(file string, ctx context.Context) (*Config, error) { c, er := Read(file) if er != nil { return c, er } go func() { logger.Debugf("Watching for config changes: config=%q", file) t := time.NewTicker(5 * time.Second) defer t.Stop() for { select { case <-ctx.Done(): return case <-t.C: if er, ok := c.update(); er == nil && ok { logger.Infof("Config updated") } } } }() return c, nil }
func (r *RawClient) UpdatePort(port int) error { req, tag := newRequest("session-set", "peer-port", port, "port-forwarding-enabled", true, "peer-port-random-on-start", false, ) logger.Debugf("Encoding %v", req) body, er := json.Marshal(req) if er != nil { return er } logger.Debugf("Requesting transmission peer port update to %d", port) out, er := r.Post(string(body)) if er != nil { return er } response := new(response) if er := json.Unmarshal(out, response); er != nil { return er } if response.Tag != tag { return errors.New("Request and response tags do not match") } if response.Result != "success" { return errors.New(response.Result) } logger.Infof("Peer port updated to %d", port) return nil }
func UpdateSettings(path, ip string, port int) error { logger.Infof("Updating transmission settings. bind-ip=%s port=%d", ip, port) data, er := ioutil.ReadFile(path) if er != nil { return er } s, er := simplejson.NewJson(data) if er != nil { return er } s.Set(bindKey, ip) s.Set(portKey, port) s.Set(forwardKey, true) s.Set(randomKey, false) data, er = s.Encode() if er != nil { return er } logger.Debugf("Writing updated transmission settings") info, _ := os.Stat(path) return ioutil.WriteFile(path, data, info.Mode().Perm()) }
func (s *torrentStatus) setFailures() { switch { case s.Error != 0: logger.Warnf("[Torrent %d: %q] Error: %s", s.ID, s.Name, s.ErrorString) s.failures++ case s.IsFinished: logger.Infof("[Torrent %d: %q] Finished", s.ID, s.Name) s.failures = 3 } }
func cleaner(conf *config.Config, c context.Context) { var ( d = conf.Cleaner.Interval.Duration clean = time.NewTicker(d) ) logger.Infof("Torrent cleaner will run once every %v", d) for { select { case <-c.Done(): clean.Stop() return case t := <-clean.C: logger.Infof("Half hourly torrent cleaning at %v", t) er := transmission. NewClient(conf.Transmission.URL.String(), conf.Transmission.User, conf.Transmission.Pass). CleanTorrents() if er != nil { logger.Errorf(er.Error()) } } } }
func createKubernetesCallbacks(e *Engine, selector kubernetes.Selector, resync time.Duration) error { var ( uc = e.GetUnversionedClient() ec = e.GetExtensionsClient() ) logger.Infof("Starting kubernetes watchers") _, endpoint := kubernetes.CreateFullController(kubernetes.EndpointsKind, e, uc, selector, resync) _, service := kubernetes.CreateFullController(kubernetes.ServicesKind, e, uc, selector, resync) _, ingress := kubernetes.CreateFullController(kubernetes.IngressesKind, e, ec, selector, resync) go endpoint.Run(e.Done()) go service.Run(e.Done()) go ingress.Run(e.Done()) return nil }
func createObjectCache(e *Engine, selector kubernetes.Selector, resync time.Duration) { var ( uc = e.GetUnversionedClient() ec = e.GetExtensionsClient() ) logger.Infof("Creating kubernetes object cache") service, er := kubernetes.CreateStore(kubernetes.ServicesKind, uc, selector, resync, e.Context) if er != nil { logger.Warnf("Failed to create Service cache") } endpoints, er := kubernetes.CreateStore(kubernetes.EndpointsKind, uc, selector, resync, e.Context) if er != nil { logger.Warnf("Failed to create Endpoints cache") } ingress, er := kubernetes.CreateStore(kubernetes.IngressesKind, ec, selector, resync, e.Context) if er != nil { logger.Warnf("Failed to create Ingress cache") } e.SetIngressStore(ingress) e.SetServiceStore(service) e.SetEndpointsStore(endpoints) }
func workers(conf *config.Config, c context.Context, quit context.CancelFunc) { var ( port = time.NewTicker(portInterval) restart = time.NewTicker(restartInterval) check = time.NewTicker(checkInterval) ) logger.Infof("Port update will run once every hour") logger.Infof("VPN restart will run once every day") trans, er := process.New("transmission", conf.Transmission.Command, os.Stdout) if er != nil { quit() logger.Fatalf(er.Error()) } vpn, er := process.New("openvpn", conf.OpenVPN.Command, os.Stdout) if er != nil { quit() logger.Fatalf(er.Error()) } if er := startProcesses(trans, vpn, conf, c); er != nil { quit() logger.Fatalf(er.Error()) } portUpdate(conf, c) for { select { case <-c.Done(): port.Stop() restart.Stop() trans.Stop() vpn.Stop() return case t := <-check.C: logger.Debugf("Checking transmission port at %v", t) if er := portCheck(trans, conf, c); er != nil { if er := restartProcesses(trans, vpn, conf, c); er != nil { port.Stop() restart.Stop() trans.Stop() vpn.Stop() logger.Fatalf(er.Error()) } } case t := <-port.C: logger.Infof("Update of Transmission port at %v", t) if er := portUpdate(conf, c); er != nil { if er := restartProcesses(trans, vpn, conf, c); er != nil { port.Stop() restart.Stop() trans.Stop() vpn.Stop() logger.Fatalf(er.Error()) } } case t := <-restart.C: logger.Infof("Restarting Transmission and OpenVPN at %v", t) if er := restartProcesses(trans, vpn, conf, c); er != nil { port.Stop() restart.Stop() trans.Stop() vpn.Stop() logger.Fatalf(er.Error()) } } } }
func main() { app.Version(version) command := kingpin.MustParse(app.Parse(os.Args[1:])) logger.Configure(*logLevel, "[runchef] ", os.Stdout) switch command { case enable.FullCommand(): cli, er := ezd.New(etcdEp, etcdTo) if er != nil { logger.Fatalf(er.Error()) } if reason, ok := cli.Get(disableKey); ok == nil { if er := cli.Delete(disableKey); er != nil { logger.Fatalf(er.Error()) } logger.Infof("Chef is now enabled! (Was disabled with reason: %s)", reason) } else { logger.Infof("Chef is already enabled.") } case disable.FullCommand(): cli, er := ezd.New(etcdEp, etcdTo) if er != nil { logger.Fatalf(er.Error()) } if reason, ok := cli.Get(disableKey); ok == nil { logger.Infof("Chef is already disabled with reason: %s", reason) } else { if er := cli.Set(disableKey, *disableReason); er != nil { logger.Fatalf(er.Error()) } logger.Infof("Chef disabled with reason: %s", *disableReason) } case shell.FullCommand(): c := *shellImage if len(*shellContainer) > 0 { c = *shellContainer *pullImage = false } if er := runShell(c, *shellCache, *pullImage); er != nil { logger.Fatalf(er.Error()) } case client.FullCommand(): c := *clientImage if len(*clientContainer) > 0 { c = *clientContainer *pullImage = false } cli, er := ezd.New(etcdEp, etcdTo) if er != nil { logger.Fatalf(er.Error()) } if reason, ok := cli.Get(disableKey); ok == nil { logger.Infof("Chef is disabled: %v", reason) os.Exit(0) } defer cleanupChef() newClientRB(filepath.Join(*chefDir, "client.rb"), *clientName, *clientEnv, *sslVerify).write() if er := runChef(c, *clientCache, *clientEnv, *clientRunlist, *clientForceFmt, *clientLocal, *pullImage); er != nil { logger.Fatalf(er.Error()) } } }