Пример #1
0
// BinaryLauncher returns a new Task generator that builds a binary runner from the given properties, which causing a relaunch of a binary file everytime it recieves a signal,  it sends out a signal onces its done running all commands
func BinaryLauncher(bin string, args []string) flux.Reactor {
	var channel chan bool

	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if channel == nil {
			channel = RunBin(bin, args, func() {
				root.Reply(true)
			}, func() {
				go root.Close()
			})
		}

		select {
		case <-root.CloseNotify():
			close(channel)
			return
		case <-time.After(0):
			//force check of boolean values to ensure we can use correct signal
			if cmd, ok := data.(bool); ok {
				channel <- cmd
				return
			}

			//TODO: should we fallback to sending true if we receive a signal normally? or remove this
			// channel <- true
		}

	}))
}
Пример #2
0
// ChunkFileScanAdaptor provides a Stacks for parser.Parser
func ChunkFileScanAdaptor() flux.Reactor {
	return flux.Reactive(func(v flux.Reactor, err error, d interface{}) {
		if err != nil {
			v.ReplyError(err)
			return
		}
		var data string
		var ok bool

		if data, ok = d.(string); !ok {
			v.ReplyError(ErrInputTytpe)
			return
		}

		var fs *os.File

		if fs, err = os.Open(data); err != nil {
			v.ReplyError(err)
			return
		}

		if err = parser.ScanChunks(parser.NewScanner(fs), func(query string) {
			v.Reply(query)
		}); err != nil {
			v.ReplyError(err)
		}
	})
}
Пример #3
0
// ParseAdaptor provides a Stacks for parser.Parser to parse stringed queries rather than from a file,it takes a string of a full single query and parses it
func ParseAdaptor(inspect *parser.InspectionFactory) *Parser {
	ps := parser.NewParser(inspect)

	ad := flux.Reactive(func(v flux.Reactor, err error, d interface{}) {
		if err != nil {
			v.ReplyError(err)
			return
		}

		var data string
		var ok bool

		if data, ok = d.(string); !ok {
			v.ReplyError(ErrInputTytpe)
			return
		}

		var gs ds.Graphs

		if gs, err = ps.Scan(bytes.NewBufferString(data)); err != nil {
			v.ReplyError(err)
			return
		}

		v.Reply(gs)
	})

	return &Parser{
		Reactor: ad,
		parser:  ps,
	}
}
Пример #4
0
// FileAppender takes the giving data of type FileWriter and appends the value out into a endpoint which is the combination of the name and the toPath value provided
func FileAppender(fx func(string) string) flux.Reactor {
	if fx == nil {
		fx = defaultMux
	}
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if file, ok := data.(*FileWrite); ok {
			// endpoint := filepath.Join(toPath, file.Path)

			endpoint := fx(file.Path)
			endpointDir := filepath.Dir(endpoint)

			//make the directory part incase it does not exists
			os.MkdirAll(endpointDir, 0700)

			osfile, err := os.Open(endpoint)

			if err != nil {
				root.ReplyError(err)
				return
			}

			defer osfile.Close()

			// io.Copy(osfile, file.Data)

			osfile.Write(file.Data)
			root.Reply(&FileWrite{Path: endpoint})
		}
	}))
}
Пример #5
0
// GoRunner calls `go run` with the command it receives from its data pipes
func GoRunner() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if cmd, ok := data.(string); ok {
			root.Reply(GoRun(cmd))
		}
	}))
}
Пример #6
0
// ModFileWrite provides a task that allows building a fileWrite modder,where you mod out the values for a particular FileWrite struct
func ModFileWrite(fx func(*FileWrite)) flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if fw, ok := data.(*FileWrite); ok {
			fx(fw)
			root.Reply(fw)
		}
	}))
}
Пример #7
0
// GoInstallerWith calls `go install` everysingle time to the provided path once a signal is received
func GoInstallerWith(path string) flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, _ interface{}) {
		if err := GoDeps(path); err != nil {
			root.ReplyError(err)
			return
		}
		root.Reply(true)
	}))
}
Пример #8
0
// GoBuilder calls `go run` with the command it receives from its data pipes, using the GoBuild function
func GoBuilder() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if cmd, ok := data.(BuildConfig); ok {
			if err := Gobuild(cmd.Path, cmd.Name, cmd.Args); err != nil {
				root.ReplyError(err)
			}
		}
	}))
}
Пример #9
0
// GoArgsBuilderWith calls `go run` everysingle time to the provided path once a signal is received using the GobuildArgs function
func GoArgsBuilderWith(cmd []string) flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, _ interface{}) {
		if err := GobuildArgs(cmd); err != nil {
			root.ReplyError(err)
			return
		}
		root.Reply(true)
	}))
}
Пример #10
0
// GoBuilderWith calls `go run` everysingle time to the provided path once a signal is received using the GoBuild function
func GoBuilderWith(cmd BuildConfig) flux.Reactor {
	validateBuildConfig(cmd)
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, _ interface{}) {
		if err := Gobuild(cmd.Path, cmd.Name, cmd.Args); err != nil {
			root.ReplyError(err)
			return
		}
		root.Reply(true)
	}))
}
Пример #11
0
// ByteRenderer provides a baseline worker for building rendering tasks eg markdown. It expects to receive a *RenderFile and then it returns another *RenderFile containing the outputed rendered data with the path from the previous RenderFile,this allows chaining with other ByteRenderers
func ByteRenderer(fx RenderMux) flux.Reactor {
	if fx == nil {
		panic("RenderMux cant be nil for ByteRender")
	}
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if databytes, ok := data.(*RenderFile); ok {
			root.Reply(&RenderFile{Path: databytes.Path, Data: fx(databytes.Data)})
		}
	}))
}
Пример #12
0
// GoArgsBuilder calls `go run` with the command it receives from its data pipes usingthe GobuildArgs function
func GoArgsBuilder() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if cmd, ok := data.([]string); ok {
			if err := GobuildArgs(cmd); err != nil {
				root.ReplyError(err)
				return
			}
			root.Reply(true)
		}
	}))
}
Пример #13
0
// GoInstaller calls `go install` from the path it receives from its data pipes
func GoInstaller() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if path, ok := data.(string); ok {
			if err := GoDeps(path); err != nil {
				root.ReplyError(err)
				return
			}
			root.Reply(true)
		}
	}))
}
Пример #14
0
// FileAllRemover takes a *RemoveFile as the data and removes the path using the os.RemoveAll
func FileAllRemover() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if file, ok := data.(*RemoveFile); ok {
			err := os.RemoveAll(file.Path)

			if err != nil {
				root.ReplyError(err)
				return
			}
		}
	}))
}
Пример #15
0
// BundleAssets creates a assets.BindFS, which when it receives any signal, updates the given file from its config
func BundleAssets(config *assets.BindFSConfig) (flux.Reactor, error) {
	bindfs, err := assets.NewBindFS(config)

	if err != nil {
		return nil, err
	}

	return flux.Reactive(func(root flux.Reactor, err error, data interface{}) {
		// bindfs.Record()
		if err := bindfs.Record(); err != nil {
			root.ReplyError(err)
			return
		}
		root.Reply(true)
	}), nil
}
Пример #16
0
// QueryAdaptor provides a simple sql parser
func QueryAdaptor(gx QueryHandler) flux.Reactor {
	return flux.Reactive(func(v flux.Reactor, err error, d interface{}) {
		if err != nil {
			v.ReplyError(err)
			return
		}

		da, ok := d.(ds.Graphs)

		if !ok {
			v.ReplyError(ErrGraphType)
			return
		}

		gx(v, da)
	})
}
Пример #17
0
//DbExecutor returns a reactor that takes a sql.Db for execution of queries
func DbExecutor(db *sql.DB) flux.Reactor {
	return flux.Reactive(func(r flux.Reactor, err error, d interface{}) {
		if err != nil {
			r.ReplyError(err)
			return
		}

		var stl *Statement
		var ok bool

		if stl, ok = d.(*Statement); !ok {
			r.ReplyError(ErrInvalidStatementType)
			return
		}

		rows, err := db.Query(stl.Query)

		if err != nil {
			r.ReplyError(err)
			return
		}

		var datarows [][]interface{}

		defer rows.Close()

		for rows.Next() {
			bu := adaptors.BuildInterfacePoints(stl.Columns)

			err := rows.Scan(bu...)

			if err != nil {
				r.ReplyError(err)
				return
			}

			datarows = append(datarows, bu)
		}

		stl.Data = adaptors.UnbuildInterfaceList(datarows)
		r.Reply(stl)
	})
}
Пример #18
0
// CommandLauncher returns a new Task generator that builds a command executor that executes a series of command every time it receives a signal, it sends out a signal onces its done running all commands
func CommandLauncher(cmd []string) flux.Reactor {
	var channel chan bool
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, _ interface{}) {
		if channel == nil {
			channel = RunCMD(cmd, func() {
				root.Reply(true)
			})
		}

		select {
		case <-root.CloseNotify():
			close(channel)
			return
		case <-time.After(0):
			channel <- true
		}

	}))
}
Пример #19
0
// FileOpCopy listens for either a FilRead or FileWrite and send that off to a given set of reactors, to reduce memory footprint the FilRead/FileWrite pointer is sent as is, so if you want a fresh copy, dereference it to have a unique copy
func FileOpCopy(to ...flux.Reactor) flux.Reactor {
	return flux.Reactive((func(root flux.Reactor, err error, data interface{}) {
		if err != nil {
			for _, fx := range to {
				fx.SendError(err)
			}
			return
		}

		if file, ok := data.(*FileWrite); ok {
			for _, fx := range to {
				fx.Send(file)
			}
		}

		if file, ok := data.(*FileRead); ok {
			for _, fx := range to {
				fx.Send(file)
			}
		}
	}))
}
Пример #20
0
// GoFileLauncher returns a new Task generator that builds a binary runner from the given properties, which causing a relaunch of a binary file everytime it recieves a signal,  it sends out a signal onces its done running all commands
func GoFileLauncher(goFile string, args []string) flux.Reactor {
	var channel chan bool

	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if channel == nil {
			channel = RunGo(goFile, args, func() {
				root.Reply(true)
			}, func() {
				go root.Close()
			})
		}

		select {
		case <-root.CloseNotify():
			close(channel)
			return
		case <-time.After(0):
			channel <- true
		}

	}))
}
Пример #21
0
// JSBuildLauncher returns a Task generator that builds a new jsbuild task giving the specific configuration and on every reception of signals rebuilds and sends off a FileWrite for each file i.e the js and js.map file
func JSBuildLauncher(config JSBuildConfig) flux.Reactor {
	if config.Package == "" {
		panic("JSBuildConfig.Package can not be empty")
	}

	if config.FileName == "" {
		config.FileName = "jsapp.build"
	}

	// var session *JSSession
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		// if session == nil {
		session := NewJSSession(config.Tags, config.Verbose, false)
		// }

		// session.Session.
		//do we have an optional PackageDir that is not empty ? if so we use session.BuildDir
		//else session.BuildPkg
		var js, jsmap *bytes.Buffer
		var err error

		if config.PackageDir != "" {
			js, jsmap, err = session.BuildDir(config.PackageDir, config.Package, config.FileName)
		} else {
			js, jsmap, err = session.BuildPkg(config.Package, config.FileName)
		}

		if err != nil {
			root.ReplyError(err)
			return
		}

		jsfile := fmt.Sprintf("%s.js", config.FileName)
		jsmapfile := fmt.Sprintf("%s.js.map", config.FileName)

		root.Reply(&fs.FileWrite{Data: js.Bytes(), Path: filepath.Join(config.Folder, jsfile)})
		root.Reply(&fs.FileWrite{Data: jsmap.Bytes(), Path: filepath.Join(config.Folder, jsmapfile)})
	}))
}
Пример #22
0
// FileReader returns a new flux.Reactor that takes a path and reads out returning the file path
func FileReader() flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, data interface{}) {
		if pr, ok := data.(*FileRead); ok {
			root.Reply(pr)
			return
		}

		if path, ok := data.(string); ok {
			if _, err := os.Stat(path); err == nil {
				file, err := os.Open(path)

				if err != nil {
					root.ReplyError(err)
					return
				}

				defer file.Close()

				var buf bytes.Buffer

				//copy over data
				_, err = io.Copy(&buf, file)

				//if we have an error and its not EOF then reply with error
				if err != nil && err != io.EOF {
					root.ReplyError(err)
					return
				}

				root.Reply(&FileRead{Data: buf.Bytes(), Path: path})
			} else {
				root.ReplyError(err)
			}
		}
	}))
}
Пример #23
0
// ChunkScanAdaptor provides a Stacks for parser.Parser and scans strings inputs for query
func ChunkScanAdaptor() flux.Reactor {
	return flux.Reactive(func(v flux.Reactor, err error, d interface{}) {
		if err != nil {
			v.ReplyError(err)
			return
		}

		var data string
		var ok bool

		if data, ok = d.(string); !ok {
			v.ReplyError(ErrInputTytpe)
			return
		}

		scan := parser.NewScanner(bytes.NewBufferString(data))

		if err = parser.ScanChunks(scan, func(query string) {
			v.Reply(query)
		}); err != nil {
			v.ReplyError(err)
		}
	})
}
Пример #24
0
//TableParser is a reactor that takes a array of *Tables and generates the corresponding sql statement
func TableParser() flux.Reactor {
	return flux.Reactive(func(r flux.Reactor, err error, data interface{}) {
		if err != nil {
			r.ReplyError(err)
			return
		}

		var tables Tables
		var ok bool

		tables, ok = data.(Tables)

		if !ok {
			r.ReplyError(ErrInvalidTableData)
			return
		}

		var tableNames []string
		var tableColumns []string
		var tableWheres []string
		var tableMeta = make(TableMeta)
		var lastColumSize = 0
		var graph ds.Graphs

		for _, table := range tables {

			if graph == nil {
				graph = table.Graph
			}

			//add the tables names into the array and ensure to use aliases format "TALBENAME tablename"
			tableNames = append(tableNames, fmt.Sprintf("%s %s", strings.ToUpper(table.Name), table.Key))

			//loop through each column name and append talbe alias,add the column names for the 'from' clause
			for _, coname := range table.Columns {
				tableColumns = append(tableColumns, fmt.Sprintf("%s.%s", table.Key, coname))
			}

			//collect table info for particular table
			tableMeta[table.Name] = &TableInfo{
				Alias:       table.Key,
				ParentAlias: table.PKey,
				Name:        table.Name,
				Parent:      table.Parent,
				Columns:     table.Columns,
				Begin:       lastColumSize,
				End:         (lastColumSize + (len(table.Columns) - 1)),
				Node:        table.Node,
				Graph:       table.Graph,
			}

			lastColumSize = len(tableColumns)

			// for _,clo := range table.Conditions {
			// }
			//join the conditions of this table with a AND
			clos := strings.Join(table.Conditions, "\nAND ")

			//replace both {{table}} and {{parentTable}} with the appropriate names/tags
			// log.Printf("setting alias:", table.Key, table.PKey, table.Name)
			clos = strings.Replace(clos, "{{table}}", table.Key, -1)
			clos = strings.Replace(clos, "{{parentTable}}", table.PKey, -1)

			//add this condition to the global where list
			tableWheres = append(tableWheres, clos)
		}

		var sqlst = SQLSimpleSelect

		sqlst = strings.Replace(sqlst, "{{columns}}", strings.Join(tableColumns, ", "), -1)
		sqlst = strings.Replace(sqlst, "{{tables}}", strings.Join(tableNames, ", "), -1)

		//clean where clauses of an empty strings or only spaces
		tableWheres = adaptors.CleanHouse(tableWheres)

		if len(tableWheres) < 2 {
			sqlst = strings.Replace(sqlst, "{{clauses}}", strings.Join(tableWheres, " "), -1)
		} else {
			sqlst = strings.Replace(sqlst, "{{clauses}}", strings.Join(tableWheres, "\nAND "), -1)
		}

		// log.Printf("SQL: %s", sqlst)
		r.Reply(&Statement{
			Query:   sqlst,
			Tables:  tableMeta,
			Columns: len(tableColumns),
			Graph:   graph,
		})
	})
}
Пример #25
0
// GoRunnerWith calls `go run` everysingle time to the provided path once a signal is received
func GoRunnerWith(cmd string) flux.Reactor {
	return flux.Reactive(flux.SimpleMuxer(func(root flux.Reactor, _ interface{}) {
		root.Reply(GoRun(cmd))
	}))
}
Пример #26
0
// Watch returns a task handler that watches a path for changes and passes down the file which changed
func Watch(m WatchConfig) flux.Reactor {
	var running bool
	mo := flux.Reactive(func(root flux.Reactor, err error, _ interface{}) {
		if err != nil {
			root.ReplyError(err)
			return
		}

		if running {
			return
		}

		stat, err := os.Stat(m.Path)

		if err != nil {
			root.ReplyError(err)
			go root.Close()
			return
		}

		running = true

		if !stat.IsDir() {
			flux.GoDefer("Watch", func() {
				defer root.Close()

				for {

					wo, err := fsnotify.NewWatcher()

					if err != nil {
						root.ReplyError(err)
						break
					}

					if err := wo.Add(m.Path); err != nil {
						wo.Close()
						break
					}

					select {
					case ev, ok := <-wo.Events:
						if ok {
							root.Reply(ev)
						}
					case erx, ok := <-wo.Errors:
						if ok {
							root.ReplyError(erx)
						}
					case <-root.CloseNotify():
						wo.Close()
						break
					}

					wo.Close()
				}
			})

			return
		}

		dir, err := assets.DirListings(m.Path, m.Validator, m.Mux)

		if err != nil {
			root.ReplyError(err)
			go root.Close()
			return
		}

		flux.GoDefer("Watch", func() {
			defer root.Close()

			for {

				wo, err := fsnotify.NewWatcher()

				if err != nil {
					root.ReplyError(err)
					break
				}

				dir.Listings.Wo.RLock()
				for _, files := range dir.Listings.Tree {
					wo.Add(files.AbsDir)
					files.Tree.Each(func(mod, real string) {
						rel, _ := filepath.Abs(real)
						wo.Add(rel)
						// wo.Add(filepath.Join(files.AbsDir, real))
					})
				}
				dir.Listings.Wo.RUnlock()

				select {
				case <-root.CloseNotify():
					wo.Close()
					break
				case ev, ok := <-wo.Events:
					if ok {
						file := filepath.Clean(ev.Name)
						// stat, _ := os.Stat(file)
						if (&m).Validator != nil {
							if (&m).Validator(file, nil) {
								root.Reply(ev)
							}
						} else {
							root.Reply(ev)
						}
					}
				case erx, ok := <-wo.Errors:
					if ok {
						root.ReplyError(erx)
					}
				}

				wo.Close()

				if err = dir.Reload(); err != nil {
					root.ReplyError(err)
				}

			}
		})

	})

	mo.Send(true)
	return mo
}
Пример #27
0
// JSONBuilder produces a flux.Reactor for turning a list of sql data with corresponding tableinfo's to build a json structure
func JSONBuilder() flux.Reactor {
	return flux.Reactive(func(r flux.Reactor, err error, d interface{}) {
		if err != nil {
			r.ReplyError(err)
			return
		}

		var stl *Statement
		var ok bool

		if stl, ok = d.(*Statement); !ok {
			r.ReplyError(ErrInvalidStatementType)
			return
		}

		for _, blck := range stl.Data {
			func(block []interface{}) {
				// orecord := make(RecordBlock)
				// records = append(records, orecord)
				for _, ifo := range stl.Tables {
					func(info *TableInfo) {
						section := make(TableSection)
						max := info.End - info.Begin

						for j := 0; j <= max; j++ {
							func(ind int) {
								col := info.Columns[ind]
								section[col] = block[info.Begin+ind]
							}(j)
						}

						// info.Node.Result = section
						info.Node.Result = append(info.Node.Result, section)
						// orecord[info.Alias] = section
					}(ifo)
				}

			}(blck)
		}

		mo, err := adaptors.BFGraph(stl.Graph)

		if err != nil {
			r.ReplyError(err)
			return
		}

		var roots = make(map[string]*parser.ParseNode)
		var tree = make(map[string]interface{})
		var root *parser.ParseNode

		for mo.Next() == nil {
			no := mo.Node().(*parser.ParseNode)

			if root == nil {
				root = no
			}

			if _, ok := roots[no.Key]; !ok {
				roots[no.Key] = no
			}

			rod, ok := roots[no.PKey]

			if !ok {
				continue
			}

			for n, rorec := range rod.Result {
				rorec[no.Name()] = no.Result[n]
			}
		}

		// res := root.Result
		tree[root.Name()] = root.Result
		stl = nil
		roots = nil
		root = nil

		r.Reply(tree)
	})
}
Пример #28
0
// WatchSet unlike Watch is not set for only working with one directory, by providing a WatchSetConfig you can supply multiple directories and files which will be sorted and watch if all paths were found to be invalid then the watcher will be closed and so will the task, an invalid file error will be forwarded down the reactor chain
func WatchSet(m WatchSetConfig) flux.Reactor {
	var running bool
	mo := flux.Reactive(func(root flux.Reactor, err error, _ interface{}) {
		if err != nil {
			root.ReplyError(err)
			return
		}

		if running {
			return
		}

		running = true

		var dirlistings []*assets.DirListing
		var files []string
		var dirsAdded = make(map[string]bool)

		for _, path := range m.Path {
			if dirsAdded[path] {
				continue
			}

			stat, err := os.Stat(path)
			if err != nil {
				// log.Printf("stat error: %s", err)
				root.ReplyError(err)
				continue
			}

			if stat.IsDir() {
				if dir, err := assets.DirListings(path, m.Validator, m.Mux); err == nil {
					dirsAdded[path] = true
					dirlistings = append(dirlistings, dir)
				} else {
					root.ReplyError(err)
				}
			} else {
				if !dirsAdded[filepath.Dir(path)] {
					files = append(files, path)
				}
			}
		}

		if len(dirlistings) <= 0 && len(files) <= 0 {
			log.Printf("no dirlistings, will close")
			go root.Close()
			log.Printf("no dirlistings, will close")
			return
		}

		flux.GoDefer("Watch", func() {
			defer root.Close()

			for {

				wo, err := fsnotify.NewWatcher()

				if err != nil {
					root.ReplyError(err)
					break
				}

				// var watched = make(map[string]bool)
				//reload all concerned directories into watcher
				for _, dir := range dirlistings {
					dir.Listings.Wo.RLock()
					for _, files := range dir.Listings.Tree {
						// log.Printf("Checking folder: %s", files.Dir)
						// if !watched[files.AbsDir] {
						// watched[files.AbsDir] = true
						wo.Add(files.AbsDir)
						// }

						files.Tree.Each(func(mod, real string) {
							// if watched[real] {
							// log.Printf("duplicate found %s -> %s -> %s", mod, real, files.AbsDir)
							// return
							// }

							// watched[real] = true
							rel, _ := filepath.Abs(real)
							wo.Add(rel)
							// if err != nil {
							// 	rel = real
							// }
							// wo.Add(filepath.Join(files.Dir, real))
							// wo.Add(filepath.Join(files.AbsDir, real))
						})
					}
					dir.Listings.Wo.RUnlock()
				}

				//reload all concerned files found in the path
				for _, file := range files {
					wo.Add(file)
				}

				select {
				case <-root.CloseNotify():
					break
				case ev, ok := <-wo.Events:
					if ok {
						if (&m).Validator != nil {
							file := filepath.Clean(ev.Name)
							// log.Printf("checking file: %s", file)
							if (&m).Validator(file, nil) {
								// log.Printf("passed file: %s", file)
								root.Reply(ev)
							}
						} else {
							// log.Printf("backdrop file: %s", ev)
							root.Reply(ev)
						}
					}
				case erx, ok := <-wo.Errors:
					if ok {
						root.ReplyError(erx)
					}
				}

				wo.Close()

				//reload all concerned directories
				for _, dir := range dirlistings {
					dir.Reload()
				}
			}
		})

	})

	mo.Send(true)
	return mo
}