func main() { iniflags.Parse() runtime.GOMAXPROCS(*goMaxProcs) cache = createCache() defer cache.Close() upstreamClient = http.Client{ Transport: &http.Transport{ MaxIdleConnsPerHost: *maxIdleUpstreamConns, }, } var addr string for _, addr = range strings.Split(*httpsListenAddrs, ",") { go serveHttps(addr) } for _, addr = range strings.Split(*listenAddrs, ",") { go serveHttp(addr) } waitForeverCh := make(chan int) <-waitForeverCh }
func main() { flag.IntVar(&port, "port", 8080, "the port listen to") flag.StringVar(&serverName, "server_name", "", "the server name") flag.Var(&hosts, "server", "the server connect to") flag.StringVar(&docroot, "docroot", ".", "the local http www root") iniflags.Parse() initRouters() if len(hosts) == 0 { log.Fatal("you must special a server") } http2.VerboseLogs = false log.Printf("Listening on :%d", port) p := &proxy{} p.handler = &http2.Transport{ DialTLS: p.dialTLS, AllowHTTP: true, } hdr := &myhandler{proxy: p} err := http.ListenAndServe(fmt.Sprintf(":%d", port), handlers.CombinedLoggingHandler(os.Stdout, hdr)) if err != nil { log.Fatal(err) } }
func LoadConfig() Config { var ( mongo_user = flag.String("mongo_user", "", "MongoDB User") mongo_pass = flag.String("mongo_pass", "", "MongoDB Password") statsd_host = flag.String("statsd_host", "localhost", "StatsD Host") statsd_port = flag.Int("statsd_port", 8125, "StatsD Port") statsd_env = flag.String("statsd_env", "dev", "StatsD metric environment prefix") statsd_cluster = flag.String("statsd_cluster", "0", "StatsD metric cluster prefix") interval = flag.Duration("interval", 5*time.Second, "Polling interval") ) flag.Var(&mongo_addresses, "mongo_address", "List of mongo addresses in host:port format") iniflags.Parse() if len(mongo_addresses) == 0 { mongo_addresses = append(mongo_addresses, "localhost:27017") } cfg := Config{ Interval: *interval, Mongo: Mongo{ Addresses: mongo_addresses, User: *mongo_user, Pass: *mongo_pass, }, Statsd: Statsd{ Host: *statsd_host, Port: *statsd_port, Env: *statsd_env, Cluster: *statsd_cluster, }, } return cfg }
func parse_flags() { iniflags.Parse() var err error for _, s := range srv { sv, err := parse_server(s) if err != nil { log.Print(err) } else { Servers = append(Servers, sv) } } for _, dsvr := range default_server { proto, addr, err := parse_addr(dsvr) if err != nil { log.Fatal(err) } var c *dns.Client if proto == "udp" { c = client_udp } else { c = client_tcp } upsrv := &UpstreamServer{ Addr: addr, Proto: proto, client: c, } DefaultServer = append(DefaultServer, upsrv) } if len(DefaultServer) == 0 { log.Fatal("please special a -upstream") } a, err := load_domain(blacklist_file) if err != nil { log.Println(err) } else { Blacklist_ips = a } if hostfile == "" { hostfile = GetHost() } if hostfile != "" { record_hosts, err = ReadHosts(hostfile) if err != nil { log.Fatal(err) } } if region_file != "" { ip_region = parse_net(region_file) } }
func main() { iniflags.Parse() backend1 := logging.NewLogBackend(os.Stdout, "", 0) backend1Formatter := logging.NewBackendFormatter(backend1, format) logging.SetBackend(backend1Formatter) //Expand path path := *p if strings.HasPrefix(*p, "~") { usr, _ := user.Current() dir := usr.HomeDir path = strings.Replace(*p, "~", dir, 1) } //Run each scraper _log.Info("basepath: [%s], scrapers: [%s]", path, *scrapers) for _, s := range strings.Split(*scrapers, ",") { switch s { case "bitstamp": handler_wrapper(path, bitstamp.Scrape, "bitstamp") case "bitfinex": handler_wrapper(path, bitfinex.Scrape, "bitfinex") } } }
func init() { flags() iniflags.Parse() session, err := mgo.Dial(mongoCS) if err != nil { panic(err) } session.SetMode(mgo.Monotonic, true) MongoDB = session.DB(mongoName) }
func main() { // set up recovery defer recoverPanic() // intialize flags iniflags.Parse() // start web server server() }
func main() { iniflags.Parse() fmt.Printf("Config:\n") flag.VisitAll(func(f *flag.Flag) { fmt.Printf("%s=%v\n", f.Name, f.Value) }) fmt.Printf("\n") rand.Seed(time.Now().UnixNano()) runtime.GOMAXPROCS(*goMaxProcs) serverAddrs_ := strings.Split(*serverAddrs, ",") fmt.Printf("Preparing...") key = getRandomKey(*keySize) value = getRandomValue(*valueSize) stats := make([]Stats, *workersCount) for i := 0; i < *workersCount; i++ { stats[i].responseTimeHistogram = make([]uint32, *responseTimeHistogramSize) stats[i].minResponseTime = time.Hour * 24 * 365 } var startTime time.Time defer printStats(stats, &startTime) var worker func(wg *sync.WaitGroup, ch chan int, stats *Stats) switch *clientType { case "original": worker = getWorkerOrg(serverAddrs_) case "new": worker = getWorkerNew(serverAddrs_) default: log.Fatalf("Unknown clientType=[%s]. Expected 'new' or 'original'", *clientType) } fmt.Printf("done\n") fmt.Printf("starting...") startTime = time.Now() ch := make(chan int, 1000000) var wg sync.WaitGroup defer wg.Wait() for i := 0; i < *workersCount; i++ { wg.Add(1) go worker(&wg, ch, &stats[i]) } for i := 0; i < *requestsCount; i++ { ch <- i } close(ch) }
func main() { iniflags.Parse() api := slack.New(*token) //var channel_id string channel_id := getChannelId(*channel, api) var include, exclude *regexp.Regexp var err error if *includes != "" { include, err = regexp.Compile(*includes) if err != nil { fmt.Println("ERROR: Failed to compile `line_includes` regex.") fmt.Println(err) api.PostMessage(channel_id, "==> slackd failed to compile `line_includes` regex.", slack.NewPostMessageParameters()) api.PostMessage(channel_id, err.Error(), slack.NewPostMessageParameters()) os.Exit(2) } } if *excludes != "" { exclude, err = regexp.Compile(*excludes) if err != nil { fmt.Println("ERROR: Failed to compile `line_excludes` regex.") fmt.Println(err) api.PostMessage(channel_id, "==> slackd failed to compile `line_excludes` regex.", slack.NewPostMessageParameters()) api.PostMessage(channel_id, err.Error(), slack.NewPostMessageParameters()) os.Exit(2) } } log, err := tail.TailFile(*file, tail.Config{Follow: true, ReOpen: *reopen, Poll: true}) if err != nil { fmt.Println("ERROR: Could not tail the specified log.") fmt.Println(err) api.PostMessage(channel_id, "==> slackd could not tail the specified log.", slack.NewPostMessageParameters()) api.PostMessage(channel_id, err.Error(), slack.NewPostMessageParameters()) os.Exit(2) } for line := range log.Lines { if (include != nil && include.MatchString(line.Text)) || (exclude != nil && !exclude.MatchString(line.Text)) { api.PostMessage( channel_id, fmt.Sprintf("```%s```", line.Text), slack.NewPostMessageParameters()) } } }
func main() { iniflags.Parse() if *showVersion { fmt.Println(Commit) return } DB, err = sql.Open( "mysql", fmt.Sprintf("%s:%s@tcp(%s:%s)/mysql", *sqlUser, *sqlPass, *sqlHost, *sqlPort), ) if err != nil { log.Fatal(err) } http.HandleFunc("/", statusHandler) http.ListenAndServe(*httpHost+":"+*httpPort, nil) }
func main() { iniflags.Parse() unixTime = uint64(time.Now().Unix()) initPow() go regenPowChallenges() go updateTime() loadHtmlFile() var addr string for _, addr = range strings.Split(*listenAddrs, ",") { go serveHttp(addr) } waitForeverCh := make(chan int) <-waitForeverCh }
func main() { flag.Set("allowUnknownFlags", "true") iniflags.Parse() if certPath == nil || len(*certPath) == 0 { fmt.Fprintln(os.Stderr, "You must specify a Certificate Path") os.Exit(1) return } certFolderDB, err := utils.NewFolderDatabase(*certPath, 0444, *certsPerFolder) if err != nil { fmt.Fprintln(os.Stderr, fmt.Sprintf("unable to open Certificate Path: %s: %s", certPath, err)) os.Exit(1) return } if flag.NArg() < 1 { fmt.Fprintln(os.Stderr, "Must specify the certificate ID to retrieve") os.Exit(1) return } id, err := strconv.ParseUint(flag.Arg(0), 10, 64) if err != nil { fmt.Fprintln(os.Stderr, fmt.Sprintf("unable to parse as integer: %s", err)) os.Exit(1) return } data, err := certFolderDB.Get(id) if err != nil { fmt.Fprintln(os.Stderr, fmt.Sprintf("unable to find CertID: %s", err)) os.Exit(1) return } _, err = os.Stdout.Write(data) if err != nil { fmt.Fprintln(os.Stderr, fmt.Sprintf("unable to write out CertID: %s", err)) os.Exit(1) return } }
func main() { // Parse any set command line flags iniflags.Parse() config := &Config{iface: *iface, pcapOut: *pcapOut, enableAF: *enableAF, isRunning: true} // On ^C or SIGTERM, gracefully stop anything running sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) go func() { <-sigc log.Print("Received sigterm/sigint, stopping") config.isRunning = false }() // Open pcap log output if err := openPcap(config); err != nil { log.Fatal("Error opening pcap file: ", err) } defer config.pcapFile.Close() // Listen on the interface var sniffer Sniffer if config.enableAF { sniffer = &PcapSniffer{} log.Print("Using afpacket to sniff packets") } else { sniffer = &AfpacketSniffer{} log.Print("Using libpcap to sniff packets") } if err := sniffer.Open(config); err != nil { log.Fatal("Failed to open the sniffer: ", err) } config.sniffer = sniffer defer config.sniffer.Close() log.Printf("Listening on %s\n", config.iface) if err := Listen(config); err != nil { log.Fatal("Listening stopped with an error: ", err) } log.Print("Successful exit") }
func main() { iniflags.Parse() flag.VisitAll(func(f *flag.Flag) { fmt.Printf("%s=%v\n", f.Name, f.Value) }) runtime.GOMAXPROCS(*goMaxProcs) testUri, err := url.Parse(*testUrl) if err != nil { log.Fatalf("Error=[%s] when parsing testUrl=[%s]\n", err, *testUrl) } ch := make(chan int, 100000) bytesRead := make([]int64, *workersCount) wg := &sync.WaitGroup{} for i := 0; i < *workersCount; i++ { wg.Add(1) go worker(ch, wg, testUri, &bytesRead[i]) } log.Printf("Test started\n") startTime := time.Now() for i := 0; i < *requestsCount; i++ { ch <- 1 } close(ch) wg.Wait() duration := time.Since(startTime) seconds := float64(duration) / float64(time.Second) var totalBytesRead int64 for i := 0; i < *workersCount; i++ { totalBytesRead += bytesRead[i] } kbytesRead := float64(totalBytesRead) / float64(1000) qps := float64(*requestsCount) / seconds kbps := kbytesRead / seconds log.Printf("Done\n") log.Printf("%d requests from %d workers in %s\n", *requestsCount, *workersCount, duration) log.Printf("%.0f Kbytes read, %.0f qps, %.0f Kbps\n", kbytesRead, qps, kbps) }
func main() { iniflags.Parse() // Set up logging if lw, err := NewLogWatch(); err != nil { log.Printf("[flag -logfile] '%s' will carry on without the logfile\n", err) } else { http.Handle("/tail1000", lw) } if config.TrimQueries { log.Printf("Testing stemming: '%s'\n", StemQuery("I like to stemming my queries")) // This is a function rather an an init, as it relies of the results of the flags LoadImportance() log.Println(TrimQuery("This is a stemming queries that needs a bit of a trim dream babies")) } // Create a liveQA lqa := NewLiveQA() // Add answer producers count := 0 for _, name := range config.Producers { if f, ok := factory[name]; ok { ap, err := f(name + ".json") if err != nil { log.Printf("[Error initialising %s] %s\n", name, err) } else { lqa.AddProducer(ap) log.Printf("Initialised '%s' answer producer\n", name) count++ } } else { log.Printf("[Error initialising %s] Unrecognised answer producer '%s'\n", name, name) } } log.Printf("Initialised %d of %d total answer producers\n", count, len(config.Producers)) http.Handle("/", lqa) log.Fatal(http.ListenAndServe(":"+strconv.Itoa(config.Port), nil)) }
func NewCTConfig() *CTConfig { ret := &CTConfig{ LogUrl: flag.String("log", "", "URL of the CT Log"), CensysPath: flag.String("censysJson", "", "Path to a Censys.io certificate json dump"), CensysUrl: flag.String("censysUrl", "", "URL to a Censys.io certificate json dump"), CensysStdin: flag.Bool("censysStdin", false, "Read a Censys.io json dump from stdin"), DbConnect: flag.String("dbConnect", "", "DB Connection String"), Verbose: flag.Bool("v", false, "verbose output"), CertPath: flag.String("certPath", "", "Path under which to store full DER-encoded certificates"), CertsPerFolder: flag.Uint64("certsPerFolder", 16384, "Certificates per folder, when stored"), Offset: flag.Uint64("offset", 0, "offset from the beginning"), OffsetByte: flag.Uint64("offsetByte", 0, "byte offset from the beginning, only for censysJson and not compatible with offset"), Limit: flag.Uint64("limit", 0, "limit processing to this many entries"), GeoipDbPath: flag.String("geoipDbPath", "", "Path to GeoIP2-City.mmdb"), NumThreads: flag.Int("numThreads", 1, "Use this many threads per CPU"), HistoricalDays: flag.Int("histDays", 90, "Update this many days of historical data"), } iniflags.Parse() return ret }
func main() { var ( httpPort string globalMux = http.NewServeMux() ) flag.StringVar(&httpPort, "http.port", "9090", "http listening port") //load application properties from *.ini file //Init data access iniflags.Parse() globalMux.Handle("/v1/person/", api.BuildAuthRouter()) Logger.Info("Authentication Server listening on port - ", httpPort) //to handle CORS support //Angular JS may use OPTION for PUT request , it is handled by CORS c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, AllowedMethods: []string{"GET", "POST", "DELETE", "PUT"}, AllowCredentials: true, AllowedHeaders: []string{"*"}, }) corsHandler := c.Handler(globalMux) responseHandler := api.PreJSONProcessor(corsHandler) //start server s := &http.Server{ Addr: ":" + httpPort, Handler: context.ClearHandler(responseHandler), ReadTimeout: 30 * time.Second, WriteTimeout: 30 * time.Second, MaxHeaderBytes: 1 << 20, } Logger.Fatal(s.ListenAndServe()) }
func init() { iniflags.Parse() if len(*from) == 0 { log.Fatal("No From-Email address supplied") } if len(*to) == 0 { log.Fatal("No To-Email address supplied") } if len(*pw) == 0 { log.Println("Warning no password supplied") } if len(*user) == 0 { log.Printf("No username supplied, will use %s instead", *from) *user = *from } if len(*mail_srv) == 0 { log.Fatal("No mail server address supplied") } if strings.Compare(*mail_auth, "CRAMMD5") != 0 && strings.Compare(*mail_auth, "PLAIN") != 0 { log.Fatal("No valid authentication method for SMTP supplied") } }
func main() { iniflags.Parse() upstreamHostBytes = []byte(*upstreamHost) cache = createCache() defer cache.Close() upstreamClient = &fasthttp.HostClient{ Addr: *upstreamHost, MaxConns: *maxIdleUpstreamConns, } var addr string for _, addr = range strings.Split(*httpsListenAddrs, ",") { go serveHttps(addr) } for _, addr = range strings.Split(*listenAddrs, ",") { go serveHttp(addr) } waitForeverCh := make(chan int) <-waitForeverCh }
// init the server with specific name. func Init(name string) error { if serverInstance == nil { // read config iniflags.Parse() // read network info readNetInterfaces() // log err := initLog(name, *confLogLevel) if err != nil { return err } // server instance serverInstance = &Server{ name: name, } // init service manager serverInstance.svrmgr, err = NewServerManager(name, *confEtcd) if err != nil { return err } // create RPC client serverInstance.rpccli, err = NewRPCClient() if err != nil { return err } Log.Infof("server %s init success.", name) } return nil }
func startDaemon() { // setupSysTray() go func() { // autoextract self src, _ := osext.Executable() dest := filepath.Dir(src) // save the config.ini (if it exists) if _, err := os.Stat(dest + "/" + *configIni); os.IsNotExist(err) { fmt.Println("First run, unzipping self") err := Unzip(src, dest) fmt.Println("Self extraction, err:", err) } if _, err := os.Stat(dest + "/" + *configIni); os.IsNotExist(err) { flag.Parse() fmt.Println("No config.ini at", *configIni) } else { flag.Set("config", dest+"/"+*configIni) iniflags.Parse() } // setup logging log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) // see if we are supposed to wait 5 seconds if *isLaunchSelf { launchSelfLater() } var updater = &Updater{ CurrentVersion: version, ApiURL: *updateUrl, BinURL: *updateUrl, DiffURL: "", Dir: "update/", CmdName: *appName, } if updater != nil { go updater.BackgroundRun() } // data, err := Asset("arduino.zip") // if err != nil { // log.Println("arduino tools not found") // } createGlobalConfigMap(&globalConfigMap) //getList() f := flag.Lookup("addr") log.Println("Version:" + version) // hostname hn, _ := os.Hostname() if *hostname == "unknown-hostname" { *hostname = hn } log.Println("Hostname:", *hostname) // turn off garbage collection // this is dangerous, as u could overflow memory //if *isGC { if *gcType == "std" { log.Println("Garbage collection is on using Standard mode, meaning we just let Golang determine when to garbage collect.") } else if *gcType == "max" { log.Println("Garbage collection is on for MAXIMUM real-time collecting on each send/recv from serial port. Higher CPU, but less stopping of the world to garbage collect since it is being done on a constant basis.") } else { log.Println("Garbage collection is off. Memory use will grow unbounded. You WILL RUN OUT OF RAM unless you send in the gc command to manually force garbage collection. Lower CPU, but progressive memory footprint.") debug.SetGCPercent(-1) } ip := "0.0.0.0" log.Print("Starting server and websocket on " + ip + "" + f.Value.String()) //homeTempl = template.Must(template.ParseFiles(filepath.Join(*assets, "home.html"))) log.Println("The Serial Port JSON Server is now running.") log.Println("If you are using ChiliPeppr, you may go back to it and connect to this server.") // see if they provided a regex filter if len(*regExpFilter) > 0 { log.Printf("You specified a serial port regular expression filter: %v\n", *regExpFilter) } // list serial ports portList, _ := GetList(false) /*if errSys != nil { log.Printf("Got system error trying to retrieve serial port list. Err:%v\n", errSys) log.Fatal("Exiting") }*/ log.Println("Your serial ports:") if len(portList) == 0 { log.Println("\tThere are no serial ports to list.") } for _, element := range portList { log.Printf("\t%v\n", element) } if !*verbose { log.Println("You can enter verbose mode to see all logging by starting with the -v command line switch.") log.SetOutput(new(NullWriter)) //route all logging to nullwriter } // launch the hub routine which is the singleton for the websocket server go h.run() // launch our serial port routine go sh.run() // launch our dummy data routine //go d.run() go discoverLoop() r := gin.New() socketHandler := wsHandler().ServeHTTP r.Use(cors.Middleware(cors.Config{ Origins: "https://create.arduino.cc, http://create.arduino.cc, https://create-dev.arduino.cc, http://create-dev.arduino.cc, http://webide.arduino.cc:8080", Methods: "GET, PUT, POST, DELETE", RequestHeaders: "Origin, Authorization, Content-Type", ExposedHeaders: "", MaxAge: 50 * time.Second, Credentials: true, ValidateHeaders: false, })) r.GET("/", homeHandler) r.POST("/upload", uploadHandler) r.GET("/socket.io/", socketHandler) r.POST("/socket.io/", socketHandler) r.Handle("WS", "/socket.io/", socketHandler) r.Handle("WSS", "/socket.io/", socketHandler) go func() { if err := r.RunTLS(*addrSSL, filepath.Join(dest, "cert.pem"), filepath.Join(dest, "key.pem")); err != nil { fmt.Printf("Error trying to bind to port: %v, so exiting...", err) log.Fatal("Error ListenAndServe:", err) } }() if err := r.Run(*addr); err != nil { fmt.Printf("Error trying to bind to port: %v, so exiting...", err) log.Fatal("Error ListenAndServe:", err) } }() }
func main() { iniflags.Parse() runtime.GOMAXPROCS(*goMaxProcs) syncInterval_ := *syncInterval if syncInterval_ <= 0 { syncInterval_ = ybc.ConfigDisableSync } config := ybc.Config{ MaxItemsCount: ybc.SizeT(*maxItemsCount), DataFileSize: ybc.SizeT(*cacheSize) * ybc.SizeT(1024*1024), HotItemsCount: ybc.SizeT(*hotItemsCount), HotDataSize: ybc.SizeT(*hotDataSize), DeHashtableSize: *deHashtableSize, SyncInterval: syncInterval_, } var cache ybc.Cacher var err error cacheFilesPath_ := strings.Split(*cacheFilesPath, ",") cacheFilesCount := len(cacheFilesPath_) log.Printf("Opening data files. This can take a while for the first time if files are big\n") if cacheFilesCount < 2 { if cacheFilesPath_[0] != "" { config.DataFile = cacheFilesPath_[0] + ".go-memcached.data" config.IndexFile = cacheFilesPath_[0] + ".go-memcached.index" } cache, err = config.OpenCache(true) if err != nil { log.Fatalf("Cannot open cache: [%s]", err) } } else if cacheFilesCount > 1 { config.MaxItemsCount /= ybc.SizeT(cacheFilesCount) config.DataFileSize /= ybc.SizeT(cacheFilesCount) var configs ybc.ClusterConfig configs = make([]*ybc.Config, cacheFilesCount) for i := 0; i < cacheFilesCount; i++ { cfg := config cfg.DataFile = cacheFilesPath_[i] + ".go-memcached.data" cfg.IndexFile = cacheFilesPath_[i] + ".go-memcached.index" configs[i] = &cfg } cache, err = configs.OpenCluster(true) if err != nil { log.Fatalf("Cannot open cache cluster: [%s]", err) } } defer cache.Close() log.Printf("Data files have been opened\n") s := memcache.Server{ Cache: cache, ListenAddr: *listenAddr, ReadBufferSize: *readBufferSize, WriteBufferSize: *writeBufferSize, OSReadBufferSize: *osReadBufferSize, OSWriteBufferSize: *osWriteBufferSize, } log.Printf("Starting the server") if err := s.Serve(); err != nil { log.Fatalf("Cannot serve traffic: [%s]", err) } }
func main() { iniflags.Parse() go ConsoleInput() Connect() channels = make(map[string]int) gLevelID = make(map[int]int) gUserName = make(map[int]string) gLevel = make(map[int]string) splitchannel := strings.Split(channellist, ",") for i := range splitchannel { channels[splitchannel[i]] = i + 1 } fmt.Fprintf(conn, "USER %s 8 * :%s\r\n", nick, nick) fmt.Fprintf(conn, "PASS %s\r\n", oauth) fmt.Fprintf(conn, "NICK %s\r\n", nick) fmt.Fprintf(conn, "CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\r\n") fmt.Printf("Channels: ") //Looping through all the channels for k, i := range channels { fmt.Fprintf(conn, "JOIN %s\r\n", k) fmt.Printf("#%d: %s, ", i, blue(k)) } fmt.Printf("\nInserted information to server...\n") //Initialize DB = Create tables & add streamers InitDB() defer conn.Close() reader2 := bufio.NewReader(conn) tp := textproto.NewReader(reader2) go ConsoleInput() for { line, err := tp.ReadLine() if err != nil { break // break loop on errors } var ( username string channel string irc map[string]string tags map[string]string isTags bool ) irc = parseIRC(line) if irc["tags"] != "" { tags = parseTags(irc["tags"]) isTags = true } go logIRC(irc) switch irc["command"] { case "PING": fmt.Fprintf(conn, "PONG %s\r\n", irc["trailing"]) if debug { fmt.Printf(info("PONG\n")) } case "PRIVMSG": if isTags { username = tags["display-name"] } if username == "" { split := strings.Split(irc["prefix"], "!") username = strings.Replace(split[0], ":", "", 1) } msg := strings.Replace(irc["trailing"], ":", "", 1) fmt.Printf("[%s] %s <%s> %s\n", time.Now().Format("15:04"), blue(irc["params"]), fmtName(tags["color"], username, tags["subscriber"], tags["turbo"], tags["user-type"]), white(msg)) go CmdInterpreter(irc["params"], username, msg) //fmt.Printf("%q\n", irc) case "USERNOTICE": if tags["display-name"] == "" { username = tags["login"] } else { username = tags["display-name"] } switch tags["room-id"] { case "22121645": channel = "#retku" case "35032693": channel = "#herramustikka" default: channel = "#derp" } months := tags["msg-param-months"] writeSubs(channel, username, months) default: if debug { fmt.Printf("%q\n", irc) } } } }
func Setup_config() { // Load config, this should all be refactored because it's awful debug := flag.Bool("debug", false, "enables debug mode") logfile := flag.String("log", "", "file for the log, if empty will log only to stdout") log_syslog := flag.Bool("use_syslog", false, "log only to syslog") bind_address := flag.String("bind_address", "", "IP to listen on") bind_port := flag.String("bind_port", "5358", "port to listen on") all_tcp := flag.Bool("all_tcp", true, "sends all queries over tcp") master := flag.String("master", "", "master to zone transfer from") query_dest := flag.String("queries", "", "nameserver to query to grok zone state") zone_file_path_raw := flag.String("zone_path", "", "path to write zone files") query_timeout_raw := flag.Int("query_timeout", 10, "seconds before output dns queries timeout from slappy") transfer_source_raw := flag.String("transfer_source", "", "source IP for zone transfers") transfer_source = nil allow_notify_raw := flag.String("allow_notify", "", "comma-separated list of IPs allowed to query slappy") allow_notify := []string{} limit_rndc := flag.Bool("limit_rndc", false, "enables limiting concurrent rndc calls with rndc_timeout, rndc_limit") rndc_timeout_raw := flag.Int("rndc_timeout", 25, "seconds before waiting rndc call will abort") rndc_limit := flag.Int("rndc_limit", 50, "number of concurrent rndc calls allowed if limit_rndc=true") status_file := flag.String("status_file", "", "path to write a status file, empty means no status file") status_interval_raw := flag.Int("status_interval", 60, "seconds to wait between status file writes") stats_uri := flag.String("stats_uri", "/stats.", "hostname to dig for to get stats, should be an invalid dns name!") flag.Usage = func() { flag.PrintDefaults() } // You can specify an .ini file with the -config iniflags.Parse() // Parse the transfer_source IP into the proper type if *transfer_source_raw != "" { transfer_source = &net.TCPAddr{IP: net.ParseIP(*transfer_source_raw)} } if *allow_notify_raw != "" { for _, ip := range strings.Split((*allow_notify_raw), ",") { allow_notify = append(allow_notify, strings.TrimSpace(ip)) } } query_timeout := time.Duration(*query_timeout_raw) * time.Second rndc_timeout := time.Duration(*rndc_timeout_raw) * time.Second status_interval := time.Duration(*status_interval_raw) * time.Second zone_file_path := *zone_file_path_raw if !strings.HasSuffix(zone_file_path, "/") { zone_file_path = zone_file_path + "/" } // Set up rndc rate limiter if *limit_rndc == true { rndc_counter = make(chan string, *rndc_limit) } conf = Config{ Debug: *debug, Logfile: *logfile, Log_syslog: *log_syslog, Bind_address: *bind_address, Bind_port: *bind_port, All_tcp: *all_tcp, Master: *master, Query_dest: *query_dest, Zone_file_path: zone_file_path, Query_timeout: query_timeout, Transfer_source: transfer_source, Allow_notify: allow_notify, Limit_rndc: *limit_rndc, Rndc_timeout: rndc_timeout, Rndc_limit: *rndc_limit, Rndc_counter: rndc_counter, Status_file: *status_file, Status_interval: status_interval, Stats_uri: *stats_uri, } }
func main() { iniflags.Parse() fmt.Println(`Backup Eonza Files v`+VERSION, ` beta, (c) Novostrim OOO, 2015`) fmt.Println(`Help: `, HOMEPAGE) if flags.help { switch runtime.GOOS { case "linux": exec.Command("xdg-open", HOMEPAGE).Start() case "windows": exec.Command(`rundll32.exe`, `url.dll,FileProtocolHandler`, HOMEPAGE).Start() case "darwin": exec.Command("open", HOMEPAGE).Start() } } dirs = make(map[string]bool) if !IsEmpty(flags.log) { flags.log, _ = filepath.Abs(flags.log) f, err := os.OpenFile(flags.log, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalln("Error opening log file: %v", err) } defer f.Close() fmt.Println(`Log-file: `, flags.log) log.SetOutput(f) } log.Println("Start backupenz") for IsEmpty(flags.url) { fmt.Print("Eonza URL: ") fmt.Scanln(&flags.url) } var err error enzurl, err = url.Parse(flags.url) if err != nil { log.Fatalln("ERROR! Eonza URL is not valid") } if !strings.HasSuffix(enzurl.Path, `/`) { enzurl.Path += `/` } cookieJar, _ := cookiejar.New(nil) client = &http.Client{ Jar: cookieJar, } for { for IsEmpty(flags.login) { fmt.Print("Login: "******"Password: "******"Storage path: ", storage) if flags.mirror { getfulllist() } loadfiles(getmaxid()) if downloaded == 0 { log.Println(`There are no files to download`) } if flags.mirror { todel := 0 for key := range fullList { if base := filepath.Base(key); strings.HasPrefix(base, `_`) { continue } todel++ } isdel := `Y` if IsEmpty(flags.log) && todel > 0 { for { fmt.Print(todel, ` files have not been found on the server. Would you like to delete them localy? Yes[Y]/No[N]: `) fmt.Scanln(&isdel) isdel = strings.ToUpper(isdel) if isdel == `Y` || isdel == `N` { break } } } if isdel == `Y` { for key := range fullList { if os.Remove(key) == nil { log.Println(`Delete:`, key) } } } } if flags.db { createbackup() } log.Println("backupenz has been successfully finished") }
func main() { iniflags.Parse() app := goshin.NewGoshin() app.Address = fmt.Sprintf("%s:%d", *hostPtr, *portPtr) app.EventHost = *eventHostPtr app.Interval = *intervalPtr if len(*tagPtr) != 0 { app.Tag = strings.Split(*tagPtr, ",") } app.Ttl = float32(*ttlPtr) // iface ifaces := make(map[string]bool) if len(*ifacesPtr) != 0 { for _, iface := range strings.Split(*ifacesPtr, ",") { ifaces[iface] = true } } app.Ifaces = ifaces ignoreIfaces := make(map[string]bool) if len(*ignoreIfacesPtr) != 0 { for _, ignoreIface := range strings.Split(*ignoreIfacesPtr, ",") { ignoreIfaces[ignoreIface] = true } } app.IgnoreIfaces = ignoreIfaces // devices devices := make(map[string]bool) if len(*devicesPtr) != 0 { for _, device := range strings.Split(*devicesPtr, ",") { devices[device] = true } } app.Devices = devices ignoreDevices := make(map[string]bool) if len(*ignoreDevicesPtr) != 0 { for _, ignoreDevice := range strings.Split(*ignoreDevicesPtr, ",") { ignoreDevices[ignoreDevice] = true } } app.IgnoreDevices = ignoreDevices // threshold cpuThreshold := goshin.NewThreshold() cpuThreshold.Critical = *cpuCriticalPtr cpuThreshold.Warning = *cpuWarningPtr app.Thresholds["cpu"] = cpuThreshold loadThreshold := goshin.NewThreshold() loadThreshold.Critical = *loadCriticalPtr loadThreshold.Warning = *loadWarningPtr app.Thresholds["load"] = loadThreshold memoryThreshold := goshin.NewThreshold() memoryThreshold.Critical = *memoryCriticalPtr memoryThreshold.Warning = *memoryWarningPtr app.Thresholds["memory"] = memoryThreshold diskThreshold := goshin.NewThreshold() diskThreshold.Critical = *diskCriticalPtr diskThreshold.Warning = *diskWarningPtr app.Thresholds["disk"] = diskThreshold checks := make(map[string]bool) if len(*checksPtr) != 0 { for _, check := range strings.Split(*checksPtr, ",") { checks[check] = true } } app.Checks = checks app.Start() }
func main() { flag.StringVar(&Config.bind, "bind", "127.0.0.1:8080", "host to bind to (default: 127.0.0.1:8080)") flag.StringVar(&Config.filesDir, "filespath", "files/", "path to files directory") flag.StringVar(&Config.metaDir, "metapath", "meta/", "path to metadata directory") flag.BoolVar(&Config.noLogs, "nologs", false, "remove stdout output for each request") flag.BoolVar(&Config.allowHotlink, "allowhotlink", false, "Allow hotlinking of files") flag.StringVar(&Config.siteName, "sitename", "linx", "name of the site") flag.StringVar(&Config.siteURL, "siteurl", "http://"+Config.bind+"/", "site base url (including trailing slash)") flag.Int64Var(&Config.maxSize, "maxsize", 4*1024*1024*1024, "maximum upload file size in bytes (default 4GB)") flag.StringVar(&Config.certFile, "certfile", "", "path to ssl certificate (for https)") flag.StringVar(&Config.keyFile, "keyfile", "", "path to ssl key (for https)") flag.BoolVar(&Config.realIp, "realip", false, "use X-Real-IP/X-Forwarded-For headers as original host") flag.BoolVar(&Config.fastcgi, "fastcgi", false, "serve through fastcgi") flag.BoolVar(&Config.remoteUploads, "remoteuploads", false, "enable remote uploads") flag.StringVar(&Config.authFile, "authfile", "", "path to a file containing newline-separated scrypted auth keys") flag.StringVar(&Config.remoteAuthFile, "remoteauthfile", "", "path to a file containing newline-separated scrypted auth keys for remote uploads") flag.StringVar(&Config.contentSecurityPolicy, "contentsecuritypolicy", "default-src 'self'; img-src 'self' data:; style-src 'self' 'unsafe-inline'; referrer origin;", "value of default Content-Security-Policy header") flag.StringVar(&Config.fileContentSecurityPolicy, "filecontentsecuritypolicy", "default-src 'none'; img-src 'self'; object-src 'self'; media-src 'self'; style-src 'self' 'unsafe-inline'; referrer origin;", "value of Content-Security-Policy header for file access") flag.StringVar(&Config.xFrameOptions, "xframeoptions", "SAMEORIGIN", "value of X-Frame-Options header") iniflags.Parse() mux := setup() if Config.fastcgi { listener, err := net.Listen("tcp", Config.bind) if err != nil { log.Fatal("Could not bind: ", err) } log.Printf("Serving over fastcgi, bound on %s, using siteurl %s", Config.bind, Config.siteURL) fcgi.Serve(listener, mux) } else if Config.certFile != "" { log.Printf("Serving over https, bound on %s, using siteurl %s", Config.bind, Config.siteURL) err := graceful.ListenAndServeTLS(Config.bind, Config.certFile, Config.keyFile, mux) if err != nil { log.Fatal(err) } } else { log.Printf("Serving over http, bound on %s, using siteurl %s", Config.bind, Config.siteURL) err := graceful.ListenAndServe(Config.bind, mux) if err != nil { log.Fatal(err) } } }
func main() { flag.Parse() if *genCert == true { generateCertificates() os.Exit(0) } if *hibernate == false { go func() { // autoextract self src, _ := osext.Executable() dest := filepath.Dir(src) os.Mkdir(tempToolsPath, 0777) hideFile(tempToolsPath) if embedded_autoextract { // save the config.ini (if it exists) if _, err := os.Stat(dest + "/" + *configIni); os.IsNotExist(err) { log.Println("First run, unzipping self") err := Unzip(src, dest) log.Println("Self extraction, err:", err) } if _, err := os.Stat(dest + "/" + *configIni); os.IsNotExist(err) { flag.Parse() log.Println("No config.ini at", *configIni) } else { flag.Parse() flag.Set("config", dest+"/"+*configIni) iniflags.Parse() } } else { flag.Set("config", dest+"/"+*configIni) iniflags.Parse() } // move CORS to config file compatibility, Vagrant version if *origins == "" { log.Println("Patching config.ini for compatibility") f, err := os.OpenFile(dest+"/"+*configIni, os.O_APPEND|os.O_WRONLY, 0666) if err != nil { panic(err) } _, err = f.WriteString("\norigins = http://webide.arduino.cc:8080\n") if err != nil { panic(err) } f.Close() restart("") } //log.SetFormatter(&log.JSONFormatter{}) log.SetLevel(log.InfoLevel) log.SetOutput(os.Stderr) // see if we are supposed to wait 5 seconds if *isLaunchSelf { launchSelfLater() } if embedded_autoupdate { var updater = &Updater{ CurrentVersion: version, ApiURL: *updateUrl, BinURL: *updateUrl, DiffURL: "", Dir: "update/", CmdName: *appName, } if updater != nil { updater_job := func() { go updater.BackgroundRun() } scheduler.Every(5).Minutes().Run(updater_job) } } log.Println("Version:" + version) // hostname hn, _ := os.Hostname() if *hostname == "unknown-hostname" { *hostname = hn } log.Println("Hostname:", *hostname) // turn off garbage collection // this is dangerous, as u could overflow memory //if *isGC { if *gcType == "std" { log.Println("Garbage collection is on using Standard mode, meaning we just let Golang determine when to garbage collect.") } else if *gcType == "max" { log.Println("Garbage collection is on for MAXIMUM real-time collecting on each send/recv from serial port. Higher CPU, but less stopping of the world to garbage collect since it is being done on a constant basis.") } else { log.Println("Garbage collection is off. Memory use will grow unbounded. You WILL RUN OUT OF RAM unless you send in the gc command to manually force garbage collection. Lower CPU, but progressive memory footprint.") debug.SetGCPercent(-1) } // see if they provided a regex filter if len(*regExpFilter) > 0 { log.Printf("You specified a serial port regular expression filter: %v\n", *regExpFilter) } // list serial ports portList, _ := GetList(false) log.Println("Your serial ports:") if len(portList) == 0 { log.Println("\tThere are no serial ports to list.") } for _, element := range portList { log.Printf("\t%v\n", element) } if !*verbose { log.Println("You can enter verbose mode to see all logging by starting with the -v command line switch.") log.SetOutput(new(NullWriter)) //route all logging to nullwriter } // launch the hub routine which is the singleton for the websocket server go h.run() // launch our serial port routine go sh.run() // launch our dummy data routine //go d.run() go discoverLoop() r := gin.New() socketHandler := wsHandler().ServeHTTP extraOriginStr := "https://create.arduino.cc, http://create.arduino.cc, https://create-dev.arduino.cc, http://create-dev.arduino.cc, http://create-staging.arduino.cc, https://create-staging.arduino.cc" for i := 8990; i < 9001; i++ { extraOriginStr = extraOriginStr + ", http://localhost:" + strconv.Itoa(i) + ", https://localhost:" + strconv.Itoa(i) } r.Use(cors.Middleware(cors.Config{ Origins: *origins + ", " + extraOriginStr, Methods: "GET, PUT, POST, DELETE", RequestHeaders: "Origin, Authorization, Content-Type", ExposedHeaders: "", MaxAge: 50 * time.Second, Credentials: true, ValidateHeaders: false, })) r.LoadHTMLFiles("templates/nofirefox.html") r.GET("/", homeHandler) r.GET("/certificate.crt", certHandler) r.DELETE("/certificate.crt", deleteCertHandler) r.POST("/upload", uploadHandler) r.GET("/socket.io/", socketHandler) r.POST("/socket.io/", socketHandler) r.Handle("WS", "/socket.io/", socketHandler) r.Handle("WSS", "/socket.io/", socketHandler) r.GET("/info", infoHandler) r.POST("/killbrowser", killBrowserHandler) r.POST("/pause", pauseHandler) go func() { // check if certificates exist; if not, use plain http if _, err := os.Stat(filepath.Join(dest, "cert.pem")); os.IsNotExist(err) { return } start := 8990 end := 9000 i := start for i < end { i = i + 1 portSSL = ":" + strconv.Itoa(i) if err := r.RunTLS(portSSL, filepath.Join(dest, "cert.pem"), filepath.Join(dest, "key.pem")); err != nil { log.Printf("Error trying to bind to port: %v, so exiting...", err) continue } else { ip := "0.0.0.0" log.Print("Starting server and websocket (SSL) on " + ip + "" + port) break } } }() go func() { start := 8990 end := 9000 i := start for i < end { i = i + 1 port = ":" + strconv.Itoa(i) if err := r.Run(port); err != nil { log.Printf("Error trying to bind to port: %v, so exiting...", err) continue } else { ip := "0.0.0.0" log.Print("Starting server and websocket on " + ip + "" + port) break } } }() }() } setupSysTray() }
func main() { //flag.Parse() iniflags.Parse() svcConfig := &service.Config{ Name: "proxy3", DisplayName: "Sliq Proxy3 Service", Description: "Proxy cache server for Live HLS streams", Arguments: []string{}, } for _, arg := range os.Args[1:] { if !strings.HasPrefix(arg, "-service=") { svcConfig.Arguments = append(svcConfig.Arguments, arg) } } /*fmt.Println(svcConfig.Arguments) return*/ // intiailze the generic log file if Settings.LogSetting.Filename != "" { log.SetOutput(&lumberjack.Logger{ Filename: Settings.LogSetting.Filename, MaxSize: Settings.LogSetting.MaxSize, MaxBackups: Settings.LogSetting.MaxBackups, MaxAge: Settings.LogSetting.MaxAge, LocalTime: Settings.LogSetting.LocalTime, }) } // initialize the access log file if true { accessLog = NewAccessLog(Settings.AccessLogSetting) } initUpstreamClients() for _, client := range upstreamClients { logMessage("upstreamClient \"%s\": \"%s\"", client.name, client.upstreamHost) } runtime.GOMAXPROCS(Settings.GoMaxProcs) //c := make(chan os.Signal, 1) //signal.Notify(c, os.Interrupt) //<-c //logMessage("ctrl-c is captured") prg := &program{} s, err := service.New(prg, svcConfig) if err != nil { log.Fatal(err) } errs := make(chan error, 5) logger, err = s.Logger(errs) if err != nil { log.Fatal(err) } if len(*svcFlag) != 0 { err := service.Control(s, *svcFlag) if err != nil { log.Printf("Valid actions: %q\n", service.ControlAction) log.Fatal(err) } return } go func() { for { err := <-errs if err != nil { log.Print(err) } } }() if len(*svcFlag) != 0 { err := service.Control(s, *svcFlag) if err != nil { log.Printf("Valid actions: %q\n", service.ControlAction) log.Fatal(err) } return } err = s.Run() if err != nil { logger.Error(err) } //Start(nil) }
func main() { iniflags.Parse() go ConsoleInput() Connect() channels = make(map[string]int) g_levelId = make(map[int]int) g_userName = make(map[int]string) g_level = make(map[int]string) splitchannel := strings.Split(channellist, ",") for i := range splitchannel { b := i b++ channels[splitchannel[i]] = b } fmt.Fprintf(conn, "USER %s 8 * :%s\r\n", nick, nick) fmt.Fprintf(conn, "PASS %s\r\n", oauth) fmt.Fprintf(conn, "NICK %s\r\n", nick) fmt.Fprintf(conn, "CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\r\n") fmt.Printf("Channels: ") //Looping through all the channels for k, i := range channels { fmt.Fprintf(conn, "JOIN %s\r\n", k) fmt.Printf("#%d: %s, ", i, blue(k)) } fmt.Printf("\nInserted information to server...\n") defer conn.Close() reader2 := bufio.NewReader(conn) tp := textproto.NewReader(reader2) go ConsoleInput() for { line, err := tp.ReadLine() if err != nil { break // break loop on errors } var ( username string irc map[string]string tags map[string]string isTags bool ) irc = parseIRC(line) if irc["tags"] != "" { tags = parseTags(irc["tags"]) isTags = true } go logIRC(irc) switch irc["command"] { case "PING": fmt.Fprintf(conn, "PONG %s\r\n", irc["trailing"]) if debug { fmt.Printf(info("PONG\n")) } case "PRIVMSG": if isTags { username = tags["display-name"] } if username == "" { split := strings.Split(irc["prefix"], "!") username = strings.Replace(split[0], ":", "", 1) } msg := strings.Replace(irc["trailing"], ":", "", 1) fmt.Printf("[%s] %s <%s> %s\n", time.Now().Format("15:04"), blue(irc["params"]), fmtName(tags["@color"], username, tags["subscriber"], tags["turbo"], tags["user-type"]), white(msg)) go CmdInterpreter(irc["params"], username, msg) //fmt.Printf("%q\n", irc) default: if debug { fmt.Printf("%q\n", irc) } } } }