// A build stage creates a new Package and adds all the files coming through the channel to // the package and returns the result of build as a File on the output channel. func Build(config Config) gonzo.Stage { return func(ctx context.Context, files <-chan gonzo.File, out chan<- gonzo.File) error { ctx, cancel := context.WithCancel(ctx) res := resources.New() res.Config = resources.Config(config) var err error buff := &bytes.Buffer{} for { select { case file, ok := <-files: if !ok { goto BUILD } if file.FileInfo().IsDir() { continue } path, _ := filepath.Rel(file.FileInfo().Base(), file.FileInfo().Name()) res.Add(filepath.ToSlash(path), file) ctx.Infof("Adding %s", path) defer func(path string) { ctx.Debug("Closing %s", path) file.Close() //Close files AFTER we have build our package. }(path) case <-ctx.Done(): err = ctx.Err() goto BUILD } } BUILD: if err != nil { return err } ctx.Debug("Runnig build...") err = res.Build(buff) if err != nil { cancel() return err } path := fmt.Sprintf(FilenameFormat, strings.ToLower(config.Var)) sf := gonzo.NewFile(ioutil.NopCloser(buff), gonzo.NewFileInfo()) sf.FileInfo().SetName(path) sf.FileInfo().SetSize(int64(buff.Len())) out <- sf return nil } }
//Src returns a channel of gonzo.Files that match the provided patterns. //TODO: ADD support for prefix to avoid all the util.Trims func Src(ctx context.Context, globs ...string) gonzo.Pipe { ctx, cancel := context.WithCancel(ctx) files := make(chan gonzo.File) pipe := gonzo.NewPipe(ctx, files) //TODO: Parse globs here, check for invalid globs, split them into "filters". go func() { var err error defer close(files) fileslist, err := glob.Glob(globs...) if err != nil { ctx.Error(err) return } for mp := range fileslist { var ( file gonzo.File base = glob.Dir(mp.Glob) name = mp.Name ) file, err = Read(mp.Name) ctx = context.WithValue(ctx, "file", name) if err == ErrIsDir { ctx.Warn("fs.Src Ignored Directory.") continue } if err != nil { cancel() ctx.Error(err) return } file.FileInfo().SetBase(base) file.FileInfo().SetName(name) files <- file } }() return pipe }
func makestage(stage Stage, ctx context.Context, in <-chan File) Pipe { out := make(chan File) next, cancel := context.WithCancel(ctx) go func() { err := stage(ctx, in, out) close(out) if err != nil { cancel() ctx.Error(err) } }() return pipe{files: out, context: next} }
// Merges the output of multiple chan of gonzo.File into a pipe in a serial manner. // (i.e Reads first chan until the end and moves to the next until the last channel is finished. func Queue(pipe gonzo.Pipe, pipes ...gonzo.Pipe) gonzo.Pipe { if len(pipes) == 0 { return pipe } pipes = append([]gonzo.Pipe{pipe}, pipes...) ctx, cancel := context.WithCancel(pipe.Context()) for _, pipe := range pipes { go func(c context.Context) { <-c.Done() cancel() }(pipe.Context()) } out := make(chan gonzo.File) go func(out chan gonzo.File) { defer close(out) for _, p := range pipes { func(files <-chan gonzo.File) { for { select { case f, ok := <-files: if !ok { return } out <- f case <-ctx.Done(): return } } }(p.Files()) if ctx.Err() != nil { return } } }(out) return gonzo.NewPipe(ctx, out) }
// Merge concurrently Merges the output of multiple chan of gonzo.File into a pipe. func Merge(ctx context.Context, pipes ...gonzo.Pipe) gonzo.Pipe { ctx, cancel := context.WithCancel(ctx) for _, pipe := range pipes { go func(c context.Context) { <-c.Done() cancel() }(pipe.Context()) } out := make(chan gonzo.File) go func(out chan gonzo.File) { var wg sync.WaitGroup wg.Add(len(pipes)) defer close(out) for _, p := range pipes { go func(p gonzo.Pipe) { defer wg.Done() files := p.Files() ctx := p.Context() for { select { case f, ok := <-files: if !ok { return } out <- f case <-ctx.Done(): return } } }(p) } wg.Wait() }(out) return gonzo.NewPipe(ctx, out) }
// Gets the list of urls and passes the results to output channel. // It reports the progress to the Context using a ReadProgress proxy. func Get(ctx context.Context, urls ...string) gonzo.Pipe { ctx, cancel := context.WithCancel(ctx) out := make(chan gonzo.File) client := &http.Client{} go func() { defer close(out) for _, url := range urls { if url == "" { ctx.Error("Empty URL.") cancel() return } select { case <-ctx.Done(): ctx.Warn(context.Canceled) return default: ctx.Infof("Downloading %s", url) file, err := get(ctx, client, url) if err != nil { ctx.Error(err) cancel() break } //TODO: Add progress meter. //s, _ := file.Stat() //file.Reader = c.ReadProgress(file.Reader, "Downloading "+file.Path, s.Size()) out <- file } } }() return gonzo.NewPipe(ctx, out) }
// Run setups a build and runs the listed tasks and cancels the // build on a SIGTREM or INTERRUPT. // It also calls os.Exit with appropriate code. func Run(setup func(b *kargar.Build) error) { //log.Flags = *level interrupts := make(chan os.Signal, 1) signal.Notify(interrupts, os.Interrupt, syscall.SIGTERM) ctx, cancel := context.WithCancel(context.Background()) b := kargar.NewBuild(ctx) err := setup(b) if err != nil { ctx.Fatal(err) } go func() { sig := <-interrupts // stop watches and clean up. fmt.Println() //Next line ctx.Warnf("Captured %v, stopping build and exiting...", sig) ctx.Warn("Press ctrl+c again to force exit.") cancel() ret := 0 select { case <-ctx.Done(): err := ctx.Err() if err != nil { ctx.Error(err) ret = 1 } case <-interrupts: cancel() fmt.Println() //Next line ctx.Warn("Force exit.") ret = 1 } os.Exit(ret) }() var wg sync.WaitGroup tasks := []string{"default"} if len(os.Args) > 1 { tasks = os.Args[1:] } ctx.Info(tasks) var ret uint32 for _, t := range tasks { wg.Add(1) go func(t string) { defer wg.Done() err := b.Run(t) if err != nil { atomic.StoreUint32(&ret, 1) ctx.Error(err) } }(t) } wg.Wait() //XXX: atomic operation uncessary? os.Exit(int(atomic.LoadUint32(&ret))) }