func (g *Governator) startServer() error { q := newQuit() scheme, addr, err := parseServerAddr(g.ServerAddr) if err != nil { return err } if scheme == "unix" { os.Remove(addr) } server, err := net.Listen(scheme, addr) if err != nil { return err } if scheme == "unix" { if gid := getGroupId(AppName); gid >= 0 { os.Chown(addr, 0, gid) os.Chmod(addr, 0775) } } conns := make(chan net.Conn, 10) go func() { for { conn, err := server.Accept() if err != nil { log.Errorf("error accepting connection: %s", err) } log.Debugf("new connection %s", conn.RemoteAddr()) conns <- conn } }() go func() { for { select { case <-q.stop: if scheme == "unix" { os.Remove(addr) } q.sendStopped() return case conn := <-conns: go func() { if err := g.serveConn(conn); err != nil { log.Errorf("error serving connection: %s", err) } }() } } }() g.mu.Lock() defer g.mu.Unlock() g.quits = append(g.quits, q) return nil }
// Build builds the project. If the project was already building, the build // is restarted. func (p *Project) Build() { builder := &Builder{ Dir: p.dir, GoFlags: p.goFlags, Tags: p.tags, } var restarted bool p.Lock() if p.builder != nil { p.builder.Cancel() restarted = true } p.builder = builder p.StopMonitoring() p.Unlock() if err := p.Stop(); err != nil { log.Panic(err) } p.errors = nil if !restarted { log.Infof("Building %s (%s)", p.Name(), builder.BuildCommandString()) } var err error p.errors, err = builder.Build() p.Lock() defer p.Unlock() if p.builder != builder { // Canceled by another build return } p.builder = nil p.built = time.Now().UTC() if err != nil { log.Errorf("%d errors building %s", len(p.errors), p.Name()) p.reloadClients() } else { if err := p.startLocked(); err != nil { log.Panic(err) } } if err := p.StartMonitoring(); err != nil { log.Errorf("Error monitoring files for project %s: %s. Development server must be manually restarted.", p.Name(), err) } // Build dependencies, to speed up future builds go func() { builder.GoInstallDeps() }() }
func ListHandler(ctx *app.Context) { var groups []*packageGroup dctx := getEnvironment(ctx) for _, gr := range appDocGroups(ctx) { var groupPackages []*doc.Package for _, v := range gr.Packages { pkgs, err := dctx.ImportPackages(packageDir(dctx, v)) if err != nil { log.Errorf("error importing %s: %s", v, err) continue } groupPackages = append(groupPackages, pkgs...) } if len(groupPackages) > 0 { groups = append(groups, &packageGroup{ Title: gr.Title, Packages: groupPackages, }) } } if len(groups) == 0 { ctx.NotFound("no packages to list") return } title := "Package Index" data := map[string]interface{}{ "Header": title, "Title": title, "Groups": groups, } ctx.MustExecute(PackagesTemplateName, data) }
func (b *BuildError) Code() template.HTML { if b.Filename == "" { return template.HTML("") } s, err := runtimeutil.FormatSourceHTML(b.Filename, b.Line, 5, true, true) if err != nil { log.Errorf("error formatting code from %s: %s", b.Filename, err) } return s }
func makeAssets(ctx *app.Context) { a := ctx.App() if cfg := a.Config(); cfg != nil { cfg.TemplateDebug = false } err := vfs.Walk(a.TemplatesFS(), "/", func(fs vfs.VFS, p string, info os.FileInfo, err error) error { if err != nil || info.IsDir() || p == "" || p[0] == '.' { return err } if _, err := a.LoadTemplate(p); err != nil { log.Errorf("error loading template %s: %s", p, err) } return nil }) if err != nil { log.Errorf("error listing templates: %s", err) } }
func (g *Governator) Run() error { g.mu.Lock() if g.quit != nil { g.mu.Unlock() return errors.New("governator already running") } g.quit = newQuit() g.mu.Unlock() go g.monitor.Run() if g.configDir != "" { if err := g.startWatching(); err != nil { log.Errorf("error watching %s, configuration won't be automatically updated: %s", g.servicesDir(), err) } } if g.ServerAddr != "" { if err := g.startServer(); err != nil { log.Errorf("error starting server, can't receive remote commands: %s", err) } } g.startServices(nil) g.quit.waitForStop() g.mu.Lock() for _, q := range g.quits { q.sendStop() } // Wait for goroutines to exit cleanly for _, q := range g.quits { q.waitForStopped() } g.quits = nil g.mu.Unlock() // Release the lock for stopServices g.stopServices(nil) g.mu.Lock() defer g.mu.Unlock() g.monitor.quit.sendStop() g.monitor.quit.waitForStopped() log.Debugf("daemon exiting") g.quit.sendStopped() g.quit = nil return nil }
func (s *Sharer) share(ctx *app.Context) { last, err := s.provider.LastShare(ctx, s.service) if err != nil { log.Errorf("error finding last share time on %s: %s", s.service, err) return } if last.Before(time.Now().Add(-s.interval)) { item, err := s.provider.Item(ctx, s.service) if err != nil { log.Errorf("error finding next time for sharing on %s: %s", s.service, err) return } if item != nil { result, err := Share(ctx, s.service, item, s.config) if err != nil { log.Errorf("error sharing on %s: %s", s.service, err) } s.provider.Shared(ctx, s.service, item, result, err) } } }
func devCommand(args *command.Args, opts *devOptions) error { if !opts.Verbose { log.SetLevel(log.LInfo) } dir := opts.Dir if dir == "" { dir = "." } path, err := filepath.Abs(dir) if err != nil { return err } configPath := findConfig(dir, opts.Config) if configPath == "" { name := opts.Config if name == "" { name = fmt.Sprintf("(tried %s)", strings.Join(autoConfigNames(), ", ")) } log.Panicf("can't find configuration file %s in %s", name, dir) } log.Infof("Using config file %s", configPath) p := NewProject(path, configPath) p.port = opts.Port p.tags = opts.Tags p.goFlags = opts.GoFlags p.noDebug = opts.NoDebug p.noCache = opts.NoCache p.profile = opts.Profile go p.Build() log.Infof("Starting Gondola development server on port %d (press Control+C to exit)", p.port) if !opts.NoBrowser { time.AfterFunc(time.Second, func() { host := "localhost" if sshConn := os.Getenv("SSH_CONNECTION"); sshConn != "" { parts := strings.Split(sshConn, " ") // e.g. SSH_CONNECTION="10.211.55.2 56989 10.211.55.8 22" if len(parts) == 4 { if net.ParseIP(parts[2]) != nil { host = parts[2] } } } url := fmt.Sprintf("http://%s:%d", host, p.App.Config().Port) if err := browser.Open(url); err != nil { log.Errorf("error opening browser: open %s manually (error was %s)", url, err) } }) } p.Listen() return nil }
func (f *fileWriter) compressFile(name string) { log.Debugf("will compress %s", name) var wg sync.WaitGroup wg.Add(1) go func() { if err := compressFile(name); err != nil { log.Errorf("error compressing %s: %s", name, err) } wg.Done() }() if f.waitCompress { wg.Wait() } }
// Wrap takes a app.Handler and returns a new app.Handler // wrapped by the Layer. Responses will be cached according // to what the Layer's Mediator indicates. Note that when // the environment variable GONDOLA_NO_CACHE_LAYER is non // empty, Wrap returns the same app.Handler that was // received (id est, it does nothing). This is done in // order to simplify profiling Gondola apps (gondola dev // -profile sets this environment variable). func (la *Layer) Wrap(handler app.Handler) app.Handler { if noCacheLayer { return handler } return func(ctx *app.Context) { if la.mediator.Skip(ctx) { handler(ctx) return } key := la.mediator.Key(ctx) data, _ := la.cache.GetBytes(key) if data != nil { // has cached data var response *cachedResponse err := layerCodec.Decode(data, &response) if err == nil { ctx.Set(internal.LayerServedFromCacheKey, true) header := ctx.Header() for k, v := range response.Header { header[k] = v } header["X-Gondola-From-Layer"] = fromLayer ctx.WriteHeader(response.StatusCode) ctx.Write(response.Data) return } } rw := ctx.ResponseWriter w := newWriter(rw) ctx.ResponseWriter = w handler(ctx) ctx.ResponseWriter = rw if la.mediator.Cache(ctx, w.statusCode, w.header) { response := &cachedResponse{w.header, w.statusCode, w.buf.Bytes()} data, err := layerCodec.Encode(response) if err == nil { ctx.Set(internal.LayerCachedKey, true) expiration := la.mediator.Expires(ctx, w.statusCode, w.header) la.cache.SetBytes(key, data, expiration) } else { log.Errorf("Error encoding cached response: %v", err) } } } }
func updatePackages(e *doc.Environment) { app, _ := e.Get(envAppKey).(*App) if app == nil { return } data, _ := app.Data().(*appData) if data == nil { return } for _, gr := range data.Groups { for _, pkg := range gr.Packages { if err := updatePackage(e, pkg); err != nil { log.Errorf("error updating %s: %s", pkg, err) } } } }
func (w *fsWatcher) watch() { // TODO: Add better support for added/removed files var t *time.Timer for { select { case ev, ok := <-w.watcher.Events: if !ok { // Closed return } if ev.Op == fsnotify.Chmod { break } if ev.Op == fsnotify.Remove { // It seems the Watcher stops watching a file // if it receives a DELETE event for it. For some // reason, some editors generate a DELETE event // for a file when saving it, so we must watch the // file again. Since fsnotify is in exp/ and its // API might change, remove the watch first, just // in case. w.watcher.Remove(ev.Name) w.watcher.Add(ev.Name) } if w.isValidFile(ev.Name) { if t != nil { t.Stop() } name := ev.Name t = time.AfterFunc(50*time.Millisecond, func() { w.changed(name) }) } case err := <-w.watcher.Errors: if err == nil { // Closed return } log.Errorf("Error watching: %s", err) } } }
func (f *fileWriter) Write(prefix string, b []byte) error { if f.f == nil { return errors.New("file not opened") } c1, err := fmt.Fprintf(f.f, "[%s] ", prefix) if err != nil { return err } c2, err := f.f.Write(b) if err != nil { return err } f.size += uint64(c1 + c2) if f.maxSize > 0 && f.count > 0 && f.size > f.maxSize { log.Debugf("rotating log file %s", f.name) if err := f.rotate(); err != nil { log.Errorf("error rotating file %s: %s", f.name, err) return err } } return nil }
func (g *Governator) startWatching() error { q := newQuit() watcher, err := fsnotify.NewWatcher() if err != nil { return err } go func() { End: for { select { case ev := <-watcher.Event: log.Debugf("file watcher event %s", ev) name := filepath.Base(ev.Name) if g.shouldIgnoreFile(name, ev.IsDelete() || ev.IsRename()) { break } g.mu.Lock() switch { case ev.IsCreate(): cfg := g.parseConfig(name) // If a file is moved or copied over an already existing // service configuration, we only receive a CREATE. Check // if we already have a configuration with that name and, in // that case, stop it, update its config and restart. if _, s := g.serviceByFilenameLocked(name); s != nil { s.updateConfig(cfg) g.sortServices() s.Stop() s.Start() } else { name, err := g.addServiceLocked(cfg) if err != nil { log.Errorf("error adding service %s: %s", cfg.ServiceName(), err) } else if cfg.Start { log.Debugf("starting service %s", name) s, _ := g.serviceByNameLocked(name) s.Start() } } case ev.IsDelete() || ev.IsRename(): if ii, s := g.serviceByFilenameLocked(name); s != nil { log.Debugf("removed service %s", s.Name()) if s.State == StateStarted { s.Stop() } g.services = append(g.services[:ii], g.services[ii+1:]...) } case ev.IsModify(): if _, s := g.serviceByFilenameLocked(name); s != nil { cfg := g.parseConfig(name) s.updateConfig(cfg) g.sortServices() } default: log.Errorf("unhandled event: %s\n", ev) } g.mu.Unlock() case err := <-watcher.Error: log.Errorf("error watching: %s", err) case <-q.stop: watcher.Close() q.sendStopped() break End } } }() if err := watcher.Watch(g.servicesDir()); err != nil { return err } g.mu.Lock() defer g.mu.Unlock() g.quits = append(g.quits, q) return nil }
func (w *fsWatcher) doPolling() { // Copy the map, since we might add entries to // it while iterating watched := make(map[string]time.Time) w.mu.RLock() for k, v := range w.watched { watched[k] = v } w.mu.RUnlock() for k, v := range watched { st, err := os.Stat(k) if err != nil { if os.IsNotExist(err) { // Removed file or directory w.removed(k) continue } log.Errorf("error stat'ing %s: %v", k, err) continue } if st.IsDir() { // Update stored modTime w.mu.Lock() w.watched[k] = st.ModTime() w.mu.Unlock() if !v.IsZero() && st.ModTime().Equal(v) { // Nothing new in this dir continue } entries, err := ioutil.ReadDir(k) if err != nil { log.Errorf("error reading files in %s: %v", k, err) continue } if v.IsZero() { // 1st time we're polling this dir, add its files // without triggering the Added() handler. for _, e := range entries { p := filepath.Join(k, e.Name()) if !w.isValidEntry(p, e) { continue } w.mu.Lock() w.watched[p] = e.ModTime() w.mu.Unlock() } } else { var added []os.FileInfo w.mu.RLock() for _, e := range entries { p := filepath.Join(k, e.Name()) if _, found := w.watched[p]; !found && w.isValidEntry(p, e) { added = append(added, e) } } w.mu.RUnlock() for _, e := range added { w.added(filepath.Join(k, e.Name()), e.ModTime()) } } } else if w.isValidEntry(k, st) { if mt := st.ModTime(); !mt.Equal(v) { w.watched[k] = mt if !v.IsZero() { // File was changed w.changed(k) } } } } }
func (p *Project) StartMonitoring() error { watcher, err := newFSWatcher() if err != nil { return err } var files []string pkgs, err := p.Packages() if err != nil && len(pkgs) == 0 { // Monitor just the files in the project directory infos, err2 := ioutil.ReadDir(p.dir) if err2 != nil { // Return the original error, since it will show // why the the packages failed to import return err } for _, entry := range infos { if !entry.IsDir() { files = append(files, filepath.Join(p.dir, entry.Name())) } } } watcher.IsValidFile = func(path string) bool { return path == p.configPath || isSource(path) } var timer *time.Timer var mu sync.Mutex onChanged := func(path string) { if path == p.configPath { log.Infof("Config file %s changed, restarting...", p.configPath) if err := p.Stop(); err != nil { log.Errorf("Error stopping %s: %s", p.Name(), err) } if err := p.Start(); err != nil { log.Panicf("Error starting %s: %s", p.Name(), err) } } else { // Merge multiple events arriving in // a small time window mu.Lock() if timer == nil { timer = time.AfterFunc(10*time.Millisecond, func() { mu.Lock() timer = nil p.Build() mu.Unlock() }) } mu.Unlock() } } watcher.Added = onChanged watcher.Removed = onChanged watcher.Changed = onChanged if len(files) > 0 { // Packages could not be imported and we're // using files as a fallback. for _, f := range files { if err := watcher.Add(f); err != nil { return err } } } else { if err := watcher.AddPackages(pkgs); err != nil { return err } } if err := watcher.Add(p.configPath); err != nil { return err } p.watcher = watcher return nil }
func (t *Template) prepareAssets() error { groups, err := t.preparedAssetsGroups(t.vars, t, nil) if err != nil { return err } if err := sortGroups(groups); err != nil { return err } var top bytes.Buffer var bottom bytes.Buffer for _, group := range groups { // Only bundle and use CDNs in non-debug mode if !t.Debug { if group[0].Options.Bundle() || group[0].Options.Bundable() { bundled, err := assets.Bundle(group, group[0].Options) if err == nil { group = []*assets.Group{ &assets.Group{ Manager: group[0].Manager, Assets: []*assets.Asset{bundled}, Options: group[0].Options, }, } } else { var names []string for _, g := range group { for _, a := range g.Assets { names = append(names, a.Name) } } log.Errorf("error bundling assets %s: %s - using individual assets", names, err) } } else if group[0].Options.Cdn() { for _, g := range group { var groupAssets []*assets.Asset for _, a := range g.Assets { cdnAssets, err := assets.CdnAssets(g.Manager, a) if err != nil { if !g.Manager.Has(a.Name) { return fmt.Errorf("could not find CDN for asset %q: %s", a.Name, err) } log.Warningf("could not find CDN for asset %q: %s - using local copy", a.Name, err) groupAssets = append(groupAssets, a) continue } groupAssets = append(groupAssets, cdnAssets...) } g.Assets = groupAssets } } } for _, g := range group { for _, v := range g.Assets { switch v.Position { case assets.Top: if err := assets.RenderTo(&top, g.Manager, v); err != nil { return fmt.Errorf("error rendering asset %q", v.Name) } top.WriteByte('\n') case assets.Bottom: if err := assets.RenderTo(&bottom, g.Manager, v); err != nil { return fmt.Errorf("error rendering asset %q", v.Name) } bottom.WriteByte('\n') default: return fmt.Errorf("asset %q has invalid position %s", v.Name, v.Position) } } } } t.topAssets = top.Bytes() t.bottomAssets = bottom.Bytes() return nil }