func (s *scrape) MapRegex(node xml.Node) (map[string]string, error) { if node.IsValid() == false { return nil, errors.New("Invalid node") } m := make(map[string]string, 1) inner := node.String() for k, v := range ScrapeRegex { // remove new line chars reg, _ := regexp.CompilePOSIX("\r\n|\r|\n") inner = reg.ReplaceAllString(inner, "") // get the real data reg, _ = regexp.CompilePOSIX(v[0]) scraped := reg.FindString(inner) scraped = reg.ReplaceAllString(scraped, "$1") if scraped != "" { m[k] = scraped } } // Skip empty and unwanted if len(m) > 0 { if m[ScrapeMeta[IGNOREEMPTY]] != "" { return m, nil } return nil, nil } return nil, nil }
func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(httpIngressRuleValue.Paths) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("paths"), "")) } for i, rule := range httpIngressRuleValue.Paths { if len(rule.Path) > 0 { if !strings.HasPrefix(rule.Path, "/") { allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path")) } // TODO: More draconian path regex validation. // Path must be a valid regex. This is the basic requirement. // In addition to this any characters not allowed in a path per // RFC 3986 section-3.3 cannot appear as a literal in the regex. // Consider the example: http://host/valid?#bar, everything after // the last '/' is a valid regex that matches valid#bar, which // isn't a valid path, because the path terminates at the first ? // or #. A more sophisticated form of validation would detect that // the user is confusing url regexes with path regexes. _, err := regexp.CompilePOSIX(rule.Path) if err != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex")) } } allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...) } return allErrs }
func filterSingleDir(path string, data []os.FileInfo) ([]os.FileInfo, error) { reg, err := regexp.CompilePOSIX(Config.fileregex) if err != nil { return nil, err } //搜索出生成文件 genfile := GetGenerateFileName(path) //过滤出需要生成的文件 newdata := []os.FileInfo{} var genfiledata os.FileInfo for _, singleFileInfo := range data { if singleFileInfo.Name() == genfile { genfiledata = singleFileInfo continue } if reg.Match([]byte(path+"/"+singleFileInfo.Name())) == false { continue } newdata = append(newdata, singleFileInfo) } //判断是否需要生成文件 for _, singleFileInfo := range newdata { if genfiledata == nil || singleFileInfo.ModTime().After(genfiledata.ModTime()) { return newdata, nil } } //FIXME 暂时不做增量更新策略 return newdata, nil }
//从rawurl中提取顶级域名服务商,tldTop用来查询,topDomain用来找注册商服务器,domainName用来查询的域名 func Extract_tld(rawurl string) (tldTop, topDomain, domainName string) { filename := "./res/data.txt" data := openfile(filename) host := extract_host(rawurl) host_slice := strings.Split(host, ".") length := len(host_slice) tldTop = host_slice[length-1] // fmt.Println(tldTop) //得到顶级域名和主机名 for i := 0; i < length; i++ { expr := "" for j := i; j < length; j++ { expr = expr + "." + host_slice[j] } reg, _ := regexp.CompilePOSIX("^" + expr[1:]) // reg, _ := regexp.CompilePOSIX("^" + expr[1:] + "$") if reg.MatchString(data) { if i != 0 { domainName = host_slice[i-1] + expr } else { domainName = expr } topDomain = expr[1:] return } } return }
func NewTerm(kind TKind_t, lfactor *Factor, list *List, rfactor *Factor, expr *Expr) (*Term, error) { t := new(Term) t.Kind = kind switch kind { case IN, NI: t.Left = lfactor t.Right = list case GT, LT, EQ, NE, GE, LE: t.Left = lfactor t.Right = rfactor case MA, NM: t.Left = lfactor if v, err := cast2string(rfactor.Value); err != nil { return t, err } else { if t.Right, err = regexp.CompilePOSIX(v); err != nil { return t, err } } case EXPR: t.Left = nil t.Right = expr } return t, nil }
func (s *StreamServer) GetRegexp() (map[string][]*regexp.Regexp, error) { consulSetting := make(map[string][]*regexp.Regexp) kv := s.client.KV() pairs, _, err := kv.List(s.ConsulKey, nil) if err != nil { return consulSetting, err } size := len(s.ConsulKey) + 1 for _, value := range pairs { if len(value.Key) > size { var regs []string if err := json.Unmarshal(value.Value, ®s); err == nil { var rs []*regexp.Regexp for _, v := range regs { x, e := regexp.CompilePOSIX(v) if e != nil { log.Println("get regexp", e) continue } rs = append(rs, x) } consulSetting[value.Key[size:]] = rs } } } return consulSetting, err }
func main() { var re *regexp.Regexp var err error visited := newVisitTracker() if *filter != "" { re, err = regexp.CompilePOSIX(*filter) if err != nil { log.Fatal("Failed to compile regex", err) } } url, err := url.ParseRequestURI(*webRoot) if err != nil { log.Fatal("Invalid root URL:", err) } host = url.Scheme + "://" + url.Host //probe root page rootPage, err := getPage(*webRoot) if err != nil { log.Fatal("Failed to get root page", err) } urls := extractURLs(rootPage) walkUrls(visited, urls, *webRoot, re, *ft) }
// NewPOSIX complies the provided POSIX regex. func NewPOSIX(regex string) (*Regexp, error) { re, err := regexp.CompilePOSIX(regex) if err != nil { return nil, err } return &Regexp{RE: re}, nil }
//ie,ru顶级域名处理函数,注册人提取,无电话邮箱 func ieManage(details string) (regName, regPhone, regEmail string) { re, _ := regexp.CompilePOSIX("person:.*|registrar:.*|Registrar Name:.*") regName = re.FindString(details) if len(regName) != 0 { regName = strings.TrimSpace(strings.Split(regName, ":")[1]) } return }
func (e *Excludes) Set(value string) error { r, err := regexp.CompilePOSIX(value) if err == nil { *e = append(*e, *r) } else { log.Error("Error:", err.Error()) } return nil }
func (c *Condition) AddRegexp(r map[string]string) error { for field, value := range r { reg, err := regexp.CompilePOSIX(value) if err != nil { return err } c.Regexp[field] = reg } return nil }
// TODO: Move this into a Windows specific build function and // make unix a no-op func LinuxPath(path string) string { // Strip drive prefix c:/ etc. // TODO: Need to find a way to deal with paths properly (i.e. what if multiple drives!) r, _ := regexp.CompilePOSIX("([a-zA-Z]:)(\\.*)") if r.MatchString(path) { path = r.ReplaceAllString(path, "$2") } path = strings.Replace(path, "\\", "/", -1) path = strings.Replace(path, "//", "/", -1) return path }
func compile(pattern string, options GrepOptions) (*regexp.Regexp, error) { if options.IsPerl { if options.IsIgnoreCase && !strings.HasPrefix(pattern, "(?") { pattern = "(?i)" + pattern } return regexp.Compile(pattern) } else { if options.IsIgnoreCase { pattern = strings.ToLower(pattern) } return regexp.CompilePOSIX(pattern) } }
func main() { flag.Parse() if *showVersionShort || *showVersionLong { fmt.Println("0.2") return } conf, err := sup.NewSupfile(*supfile) if err != nil { log.Fatal(err) } // Parse network and commands to be run from args. network, commands, err := parseArgs(conf) if err != nil { log.Fatal(err) } // --only option to filter hosts if *onlyHosts != "" { expr, err := regexp.CompilePOSIX(*onlyHosts) if err != nil { log.Fatal(err) } var hosts []string for _, host := range network.Hosts { if expr.MatchString(host) { hosts = append(hosts, host) } } if len(hosts) == 0 { log.Fatal(fmt.Errorf("no hosts match '%v' regexp", *onlyHosts)) } network.Hosts = hosts } // Create new Stackup app. app, err := sup.New(conf) if err != nil { log.Fatal(err) } // Run all the commands in the given network. err = app.Run(network, commands...) if err != nil { log.Fatal(err) } }
// InitRegexps initializes a list of compiled regular expressions. func InitRegexps(exprs []string) ([]*regexp.Regexp, error) { result := []*regexp.Regexp{} for _, exp := range exprs { rexp, err := regexp.CompilePOSIX(exp) if err != nil { logp.Err("Fail to compile the regexp %s: %s", exp, err) return nil, err } result = append(result, rexp) } return result, nil }
func genPatternMatcher( pattern string, sel func(last, current []byte) []byte, ) (matcher, error) { reg, err := regexp.CompilePOSIX(pattern) if err != nil { return nil, err } matcher := func(last, current []byte) bool { line := sel(last, current) return reg.Match(line) } return matcher, nil }
func (tj *toJSON) Convert(w io.Writer) error { var doc, err = xliffFromFile(tj.inFile) if err != nil { return err } var keyTrans = func(in string) string { return in } if tj.keyMatch != "" { rx, err := regexp.CompilePOSIX(tj.keyMatch) if err != nil { return err } keyTrans = func(in string) string { return rx.ReplaceAllString(in, tj.keyTo) } } var mappings = map[string]string{} for _, file := range doc.File { // note: no support for "groups" yet. for _, unit := range file.Body.TransUnit { unitID := keyTrans(unit.ID) if _, exist := mappings[unitID]; exist { log.Printf("warning: double entry for key %q", unitID) } mappings[unitID] = unit.Target.Inner } } var out []byte if tj.pretty { out, err = json.MarshalIndent(&mappings, "", "\t") } else { out, err = json.Marshal(&mappings) } if err == nil { w.Write(out) } return err }
func (ss *SocketServer) AttachMachine(msg AttachControlMessage) error { fmt.Printf("[AttachToMachine] %s\n", msg.Key) if msg.Token == "" { return fmt.Errorf("Cannot attach with an empty msg.Token string!") } machine, err := ss.getMachine(msg.Key) if err != nil { return err } defer machine.release() fmt.Printf("[AttachToMachine] Msg.Token %s --> Machine %s\n", msg.Token, msg.Key) if machine.State != MACHINE_STATE_FREE { if machine.State == MACHINE_STATE_ATTACHED && machine.Token == msg.Token { fmt.Printf("[AttachToMachine] Duplicate attach detected, will ignore: %+v\n", machine) return nil } else { return fmt.Errorf("machine %+v is not free!", machine) } } if exp, err := regexp.CompilePOSIX(msg.OriginPattern); err != nil { return err } else { machine.OriginRegexp = exp } //TODO: these snapshot names should be application names //the running and drone bits can be inferred if err := machine.startRPC(msg); err != nil { return fmt.Errorf("Could not start VM for machine %s:\n%s\n", machine.Key, err.Error()) } machine.State = MACHINE_STATE_ATTACHED machine.Token = msg.Token machine.screenState.Width = 0 machine.screenState.Height = 0 return nil }
func MatchRegexp(m *Machine, fn *Function, paramCount int) (int, *MachineException) { // stack -1: global object // stack -2: regexp string // stack -3: string or array of strings reref := m.Get(-2) objref := m.Get(-3) if reref.Type() != TypeString { m.Push(NewNull()) return 1, nil } re, err := regexp.CompilePOSIX(reref.ToString()) if err != nil { return 0, NewMachineException("matchRegex failed, " + err.Error()) } switch objref.Type() { case TypeString: m.Push(NewBoolean(re.MatchString(objref.ToString()))) case TypeArray: var i uint32 array := objref.(*Array) retval := false for i = 0; i < array.length; i++ { val, err := array.GetUInt32Property(i) if err == nil && val.Type() == TypeString { retval = re.MatchString(val.ToString()) if !retval { break } } } m.Push(NewBoolean(retval)) default: return 0, NewMachineException("matchRegex expects a string or an array as parameters") } return 1, nil }
func newSubProp_pattern(schema map[string]interface{}, m *schemaProperty) (schemaPropertySub, error) { prop_raw, exist := schema["pattern"] if !exist { return nil, nil } s := new(schemaPropertySub_pattern) prop_s, ok := prop_raw.(string) if !ok { return nil, ErrInvalidSchemaFormat } // FIXME: re2 is not compatible with ECMA-262. exp, err := regexp.CompilePOSIX(prop_s) if err != nil { return nil, ErrInvalidSchemaFormat } s.value = exp return s, nil }
//从rawurl中提取顶级域名服务商 func extract_tld(rawurl string) (tld string) { filename := "effective_tld_names.dat.txt" data := openfile(filename) host := extract_host(rawurl) host_slice := strings.Split(host, ".") length := len(host_slice) for i := 0; i < length; i++ { expr := "" for j := i; j < length; j++ { expr = expr + "." + host_slice[j] } reg, _ := regexp.CompilePOSIX("^" + expr[1:] + "$") fmt.Println(reg.MatchString(data)) if reg.MatchString(data) { tld = expr[1:] return } } return }
// generateContainerVersion creates a unique hash for a given Dockerfile. // It uses the contents of the Dockerfile and any package lock file (package.json, Gemfile etc.) // Replaces this shell: `echo $(md5Files $(find -L $1 -maxdepth 1 | egrep "(Gemfile.lock|package\.json|Dockerfile)"))` func (c *DockerCompose) generateContainerVersion(dirName string, dockerfile string) string { log.Debug("Looking for %s and related package files in: %s", dockerfile, dirName) dir, _ := os.Open(dirName) files, _ := dir.Readdir(-1) var data []byte regex, _ := regexp.CompilePOSIX(fmt.Sprintf("(Gemfile.lock|package\\.json|^%s$)", dockerfile)) for _, f := range files { if regex.MatchString(f.Name()) { log.Debug("Found file: %s", f.Name()) if d, err := ioutil.ReadFile(filepath.Join(dirName, f.Name())); err == nil { data = append(data, d...) } } } if len(data) == 0 { return "" } return fmt.Sprintf("%x", md5.Sum(data)) }
// filterString matches an input string against a filter that's an array of string in the form // ['|', 'grep', 'something', '|', 'grep', '-v', 'notsomething'] func filterString(input string, filter []string) (output string, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("filterString() -> %v", e) } }() const ( modeNull = 1 << iota modePipe modeGrep modeInverseGrep modeConsumed ) mode := modeNull for _, comp := range filter { switch comp { case "|": if mode != modeNull { panic("Invalid pipe placement") } mode = modePipe continue case "grep": if mode != modePipe { panic("grep must be preceded by a pipe") } mode = modeGrep case "-v": if mode != modeGrep { panic("-v is an option of grep, but grep is missing") } mode = modeInverseGrep default: if mode == modeNull { panic("unknown filter mode") } else if (mode == modeGrep) || (mode == modeInverseGrep) { re, err := regexp.CompilePOSIX(comp) if err != nil { panic(err) } if re.MatchString(input) { // the string matches, but we want inverse grep if mode == modeInverseGrep { return "", err } } else { // the string doesn't match, and we want grep if mode == modeGrep { return "", err } } } else { panic("unrecognized filter syntax") } // reset the mode mode = modeNull } } output = input return }
func (p *Mirror) Sync() error { log.Stage("Synchronising source/dest folders") pkiMgr, err := pki.New() pkiMgr.Config.Insecure = true if err != nil { p.pluginConfig.Ui.Error(fmt.Sprintf("Unable to setup public key infrastructure: %s", err.Error())) } Config, err := pkiMgr.GetClientTLSConfig() if err != nil { p.pluginConfig.Ui.Error(fmt.Sprintf("%v", err)) } // Removing shared folders if utils.CheckSharedFolders() { utils.UnmountSharedFolders() } // Read volumes for share/watching var volumes []string // Exclude non-local volumes (e.g. might want to mount a dir on the VM guest) for _, v := range utils.ReadComposeVolumes() { if _, err := os.Stat(v); err == nil { volumes = append(volumes, v) } } // Add PWD if nothing in compose dir, _ := os.Getwd() if len(volumes) == 0 { volumes = append(volumes, mutils.LinuxPath(dir)) } pki.MirrorConfig.ClientTlsConfig = Config excludes := make([]regexp.Regexp, len(p.Exclude)) for i, v := range p.Exclude { r, err := regexp.CompilePOSIX(v) if err == nil { excludes[i] = *r } else { log.Error("Error parsing Regex:", err.Error()) } } options := &sync.Options{Exclude: excludes, Verbose: p.Verbose} // Sync and watch all volumes for _, v := range volumes { log.Step("Syncing contents of '%s' -> '%s'", v, fmt.Sprintf("mirror://%s%s", utils.MirrorHost(), v)) err = sync.Sync(v, fmt.Sprintf("mirror://%s%s", utils.MirrorHost(), v), options) if err != nil { log.Error("Error during initial file sync: %v", err) } log.Step("Monitoring '%s' for changes", v) go sync.Watch(v, fmt.Sprintf("mirror://%s%s", utils.MirrorHost(), v), options) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, os.Interrupt, os.Kill) <-sigChan log.Debug("Interrupt received, shutting down") return nil }
func test0(t *testing.T, root string, filter func(pth string) bool, opts *Options, xerrors bool) { const ( cc = "testdata/ok/cc.y" mysql = "testdata/ok/mysql.y" ) var re *regexp.Regexp if s := *oRE; s != "" { var err error re, err = regexp.CompilePOSIX(s) if err != nil { t.Fatal(err) } } if err := filepath.Walk(root, func(pth string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } ok, err := filepath.Match("*.y", filepath.Base(pth)) if err != nil { t.Fatal(err) } if !ok || filter != nil && !filter(pth) { return nil } if re != nil && !re.MatchString(pth) { return nil } t0 := time.Now() p, err := ProcessFile(token.NewFileSet(), pth, opts) t.Log(pth, time.Since(t0)) if (pth == cc || pth == mysql) && err == nil { t.Errorf("%s: should have produced error", cc) } if err != nil { switch x := err.(type) { case scanner.ErrorList: if pth == cc && len(x) == 1 && strings.Contains(x[0].Error(), " 3 shift/reduce") { t.Log(err) break } if pth == mysql { a := []string{} for _, v := range x { a = append(a, v.Error()) } s := strings.Join(a, "\n") if len(x) == 2 && strings.Contains(s, "conflicts: 145 shift/reduce") && strings.Contains(s, "conflicts: 2 reduce/reduce") { t.Log(err) break } } for _, v := range x { switch *oNoErr { case true: t.Log(v) default: t.Error(v) } } default: switch *oNoErr { case true: t.Logf("%q\n%v", pth, err) default: t.Errorf("%q\n%v", pth, err) } } } if p == nil { return nil } if xerrors { var buf bytes.Buffer if err := p.SkeletonXErrors(&buf); err != nil { t.Error(err) } else { t.Logf("\n%s", buf.Bytes()) } } y := p.y if err == nil { for si, state := range y.States { syms, la := state.Syms0() if la != nil { syms = append(syms, la) } stop, err := y.Parser.parse(si, func() *Symbol { if len(syms) == 0 { return nil } r := syms[0] syms = syms[1:] return r }) if stop == si { continue } if err != nil { t.Error(err) } if g, e := stop, si; g != e { t.Errorf("state %d not reached (final state %d)", si, stop) } } } t.Logf("\tstates %d, parse table entries %d", len(y.States), y.entries) if _, err = newBison(pth+".bison", y); err != nil { if !os.IsNotExist(err) { switch x := err.(type) { case scanner.ErrorList: for _, v := range x { t.Error(v) } default: t.Error(err) } } } return nil }); err != nil { t.Fatal(err) } }
func CompilePOSIX(expr string) (*RegexpTools, error) { res, err := regexp.CompilePOSIX(expr) return &RegexpTools{*res}, err }
func Fuzz(data []byte) int { str := data[:len(data)/2] sstr := string(str) //restrb := data[len(data)/2:] restr := string(data[len(data)/2:]) quoted := regexp.QuoteMeta(sstr) req, err := regexp.Compile(quoted) if err == nil { if !req.MatchString(sstr) { panic("quoted is not matched") } } /* if isAscii(restrb) && isAscii(str) { re2ok, re2matched, re2err := RE2Match(restrb, str) re, err := regexp.Compile(restr) if (err == nil) != re2ok { if !(re2ok && (strings.HasPrefix(err.Error(), "error parsing regexp: invalid UTF-8") || strings.HasPrefix(err.Error(), "error parsing regexp: invalid repeat count") || strings.HasPrefix(err.Error(), "error parsing regexp: invalid escape sequence: `\\C`"))) { fmt.Printf("re=%q regexp=%v re2=%v(%v)\n", restr, err, re2ok, re2err) panic("regexp and re2 disagree on regexp validity") } } if err == nil { matched := re.Match(str) if re2matched != matched { fmt.Printf("re=%q str=%q regexp=%v re2=%v\n", restr, str, matched, re2matched) panic("regexp and re2 disagree on regexp match") } } } */ score := 0 for _, ctor := range []func(string) (*regexp.Regexp, error){ regexp.Compile, regexp.CompilePOSIX, func(str string) (*regexp.Regexp, error) { re, err := regexp.Compile(str) if err != nil { return re, err } re.Longest() return re, nil }, func(str string) (*regexp.Regexp, error) { re, err := regexp.CompilePOSIX(str) if err != nil { return re, err } re.Longest() return re, nil }, } { re, err := ctor(restr) if err != nil { continue } score = 1 prefix, complete := re.LiteralPrefix() if complete { // https://github.com/golang/go/issues/11175 if false && !re.MatchString(prefix) { panic("complete prefix is not matched") } } else { // https://github.com/golang/go/issues/11172 if false && re.MatchString(prefix) { panic("partial prefix is matched") } } re.SubexpNames() re.NumSubexp() re.Split(sstr, 1) re.Split(sstr, -1) re.FindAll(str, 1) re.FindAll(str, -1) re.FindAllSubmatch(str, 1) re.FindAllSubmatch(str, -1) str1 := str[:len(str)/2] str2 := str[len(str)/2:] match := re.FindSubmatchIndex(str1) re.Expand(nil, str2, str1, match) re.ReplaceAll(str1, str2) re.ReplaceAllLiteral(str1, str2) } return score }
func main() { kilog.Info("DANASI 0.1 starting...") var _dnsThis string var _dnsNext string flag.StringVar(&_dnsThis, "dnsThis", "127.0.0.1:53535", "address on which to listen for DNS") flag.StringVar(&_dnsNext, "dnsNext", "8.8.8.8:53", "address to which unrelated requests are forwarded") var _transThis string var _socksNext string flag.StringVar(&_transThis, "transThis", "127.0.0.1:12377", "address to which TCP connections in the IP range should be forwarded") flag.StringVar(&_socksNext, "socksNext", "127.0.0.1:2377", "SOCKS5 proxy with which matching names are dialed upon") var _ipRange string flag.StringVar(&_ipRange, "ipRange", "100.64.0.0/10", "IP range from which addresses are allocated") var _namePattern string flag.StringVar(&_namePattern, "namePattern", ".*", "regexp (POSIX) matching DNS names to be intercepted") flag.Parse() kilog.Debug("flags read:") kilog.Debug(" dnsThis = %v", _dnsThis) kilog.Debug(" dnsNext = %v", _dnsNext) kilog.Debug(" transThis = %v", _transThis) kilog.Debug(" socksNext = %v", _socksNext) kilog.Debug(" ipRange = %v", _ipRange) kilog.Debug(" namePattern = %v", _namePattern) var err error // validate the command-line arguments dnsThis, err = net.ResolveUDPAddr("udp", _dnsThis) if err != nil { kilog.Critical("*** malformed dnsThis: %v ***", err.Error()) os.Exit(-1) } dnsNext, err = net.ResolveUDPAddr("udp", _dnsNext) if err != nil { kilog.Critical("*** malformed dnsNext: %v ***", err.Error()) os.Exit(-1) } transThis, err = net.ResolveTCPAddr("tcp", _transThis) if err != nil { kilog.Critical("*** malformed transThis: %v ***", err.Error()) os.Exit(-1) } socksNext, err = net.ResolveTCPAddr("tcp", _socksNext) if err != nil { kilog.Critical("*** malformed socksNext: %v ***", err.Error()) os.Exit(-1) } _, ipRange, err = net.ParseCIDR(_ipRange) if err != nil { kilog.Critical("*** malformed ipRange: %v ***", err.Error()) os.Exit(-1) } namePattern, err = regexp.CompilePOSIX(_namePattern) if err != nil { kilog.Critical("*** malformed namePattern: %v ***", err.Error()) os.Exit(-1) } go dnsDispatch() tcpLoop() }
func main() { flag.Parse() if showHelp { fmt.Fprintln(os.Stderr, ErrUsage, "\n\nOptions:") flag.PrintDefaults() return } if showVersion { fmt.Fprintln(os.Stderr, sup.VERSION) return } conf, err := sup.NewSupfile(supfile) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } // Parse network and commands to be run from args. network, commands, err := parseArgs(conf) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } // --only option to filter hosts if onlyHosts != "" { expr, err := regexp.CompilePOSIX(onlyHosts) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } var hosts []string for _, host := range network.Hosts { if expr.MatchString(host) { hosts = append(hosts, host) } } if len(hosts) == 0 { fmt.Fprintln(os.Stderr, fmt.Errorf("no hosts match --only '%v' regexp", onlyHosts)) os.Exit(1) } network.Hosts = hosts } // --except option to filter out hosts if exceptHosts != "" { expr, err := regexp.CompilePOSIX(exceptHosts) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } var hosts []string for _, host := range network.Hosts { if !expr.MatchString(host) { hosts = append(hosts, host) } } if len(hosts) == 0 { fmt.Fprintln(os.Stderr, fmt.Errorf("no hosts left after --except '%v' regexp", onlyHosts)) os.Exit(1) } network.Hosts = hosts } // Create new Stackup app. app, err := sup.New(conf) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } // Run all the commands in the given network. err = app.Run(network, commands...) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func init() { var err error sessionRegex, err = regexp.CompilePOSIX(SESSION_REGEX) errhandler.Handle("Error compiling regex: ", err) }