func main() { err := gcfg.ReadFileInto(&cfg, "configAlert.gcfg") if err != nil { log.Fatalf("Failed to parse gcfg data: %s", err) } if color != "y" { fmt.Println("+=====================================+") fmt.Println("| Checking for page update......... |") fmt.Println("+=====================================+") fmt.Println("Use CTRL+C to Exit") } else { fmt.Println(CLR_G + "+=====================================+" + CLR_N) fmt.Println(CLR_G + "| Checking for page update......... |" + CLR_N) fmt.Println(CLR_G + "+=====================================+" + CLR_N) fmt.Println(CLR_R + "Use CTRL+C to Exit" + CLR_N) } c := cron.New() c.AddFunc(cfg.Pushbullet.Cron, func() { getPackPubFree() }) t := cron.New() t.AddFunc(cfg.Pushbullet.Traffic, func() { getTUTrafficPic() }) go c.Start() go t.Start() sig := make(chan os.Signal) signal.Notify(sig, os.Interrupt, os.Kill) <-sig }
// New creates a new scheduler to manage task scheduling and states func New(queueUri string, cacheUri string) (Metre, error) { if cacheUri == "" { cacheUri = LOCALHOST + ":" + CACHEPORT } else if strings.Index(cacheUri, ":") == 0 { cacheUri = LOCALHOST + ":" + cacheUri } if queueUri == "" { queueUri = LOCALHOST + ":" + QUEUEPORT } else if strings.Index(queueUri, ":") == 0 { queueUri = LOCALHOST + ":" + queueUri } cron := *cron.New() c, cErr := NewCache(cacheUri) if cErr != nil { return Metre{}, cErr } q, qErr := NewQueue(queueUri) if qErr != nil { return Metre{}, qErr } s := NewScheduler(q, c) m := make(map[string]Task) return Metre{cron, q, c, s, m}, nil }
// New creates a new scheduler to manage task scheduling and states func New(queueUri string, trackQueueUri string, maxParallel int) (*Metre, error) { if queueUri == "" { queueUri = LOCALHOST + ":" + QUEUEPORT } else if strings.Index(queueUri, ":") == 0 { queueUri = LOCALHOST + ":" + queueUri } if trackQueueUri == "" { trackQueueUri = LOCALHOST + ":" + TRACKQUEUEPORT } else if strings.Index(trackQueueUri, ":") == 0 { trackQueueUri = LOCALHOST + ":" + trackQueueUri } cron := *cron.New() q, qErr := NewQueue(queueUri) if qErr != nil { return nil, qErr } t, tErr := NewQueue(trackQueueUri) if tErr != nil { return nil, tErr } limitChan := make(chan int, maxParallel) m := make(map[string]*Task) s := NewScheduler(q, m) msgChan := make(chan string) return &Metre{cron, q, t, s, m, msgChan, limitChan}, nil }
func main() { manualCorrection, _ := strconv.Atoi(os.Getenv("CORRECTION")) c := cron.New() c.AddFunc("0 35 13 * * *", func() { fmt.Println("Determine timeout") duration, err := determineTimeout(200, time.Millisecond*50) if err != nil { log.Fatal(err) return } startTime := now.MustParse("13:37:00").Add(-(*duration)).Add(time.Millisecond * time.Duration(manualCorrection)) <-time.After(startTime.Sub(time.Now())) gorequest.New().Post(URL).Type("form").Send(requestBody{ Action: "new", Data: os.Getenv("USERNAME"), }).End() fmt.Println("Posted!") }) c.Start() fmt.Printf("1337 Bot cron started at %s\n", time.Now()) // Never quit... select {} }
func main() { c := cron.New() c.AddFunc("5 * * * * *", Hoge) // 毎分5秒 c.AddFunc("*/5 * * * * *", Fuga) // 5秒毎 c.Start() isRunning := true kami.Get("/", func(ctx context.Context, w http.ResponseWriter, r *http.Request) { w.Write([]byte("OK")) }) kami.Get("/start", func(ctx context.Context, w http.ResponseWriter, r *http.Request) { if !isRunning { c.Start() isRunning = true } w.Write([]byte("START OK")) }) kami.Get("/stop", func(ctx context.Context, w http.ResponseWriter, r *http.Request) { if isRunning { c.Stop() isRunning = false } w.Write([]byte("STOP OK")) }) defer func() { if isRunning { c.Stop() isRunning = false } }() kami.Serve() }
func main() { var err error db, err = sql.Open("mysql", "root:y1w2j35217@tcp(localhost:3306)/zhihu?charset=utf8") if err != nil { panic(err) } db.SetMaxOpenConns(200) db.SetMaxIdleConns(100) defer db.Close() f, err := os.OpenFile("debug.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Fatalln("os.Open failed, err:", err) } defer f.Close() w := io.MultiWriter(f, os.Stdout) log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) log.SetOutput(w) log.Println("go working...at", time.Now()) snapUser() //doSavePage() log.Println("creating cron task...") c := cron.New() c.AddFunc("0 12 8,20 * * * ", func() { snapUser() }) c.Start() rand.Seed(time.Now().UTC().UnixNano()) port := strconv.Itoa(4000 + rand.Intn(100)) log.Println(http.ListenAndServe("0.0.0.0:"+port, nil)) }
func (this *StateEngine) startScheduler() { this.Scheduler = cron.New() for _, sched := range this.schedules { this.Scheduler.AddJob(sched.When, &cronJob{this, sched}) } this.Scheduler.Start() }
// addCronTab creates a cron server for the given cronTab and // and begins managing it. func addCronTab(c cronTab) error { // We create a new cron server here since there is no way // in the github.com/robfig/cron package to remove entries. server := cron.New() // Support only traditional cronspec // Don't support seconds spec := c.Spec.Schedule if !strings.HasPrefix(c.Spec.Schedule, "@") { spec = "0 " + c.Spec.Schedule } err := server.AddFunc(spec, func() { if err := runCronJob(c); err != nil { log.Printf("Error running cron job: %v", err) } }) if err != nil { return fmt.Errorf("error adding crontab: %v", err) } cronServers[c.ObjectMeta.UID] = cronServer{ Server: server, Object: c, } server.Start() log.Printf("Added crontab: %s", c.ObjectMeta.Name) return nil }
func loopStat(ch chan Message, cBroad chan Message, db *mgo.Database) { followed := []string{} loop := true liveBroadcast := make(map[string]time.Time) c := cron.New() c.AddFunc("0 * * * * *", func() { computeStat(db, followed, 01*time.Minute) }) c.AddFunc("0 */5 * * * *", func() { computeStat(db, followed, 05*time.Minute) }) c.AddFunc("0 */15 * * * *", func() { computeStat(db, followed, 15*time.Minute) }) c.AddFunc("@hourly", func() { computeStat(db, followed, time.Hour) }) c.AddFunc("0 0 */12 * * *", func() { computeStat(db, followed, 12*time.Hour) }) c.AddFunc("@daily", func() { computeStat(db, followed, 24*time.Hour) }) c.Start() for loop { select { case msg := <-ch: followed, loop = followedHandler(followed, msg) case msg := <-cBroad: if msg.s == StartBroadcast { addBroadcast(liveBroadcast, msg.v) } else if msg.s == EndBroadcast { processBroadcast(db, liveBroadcast, msg.v) } } } }
func init() { sec := setting.Cfg.Section("database") var err error x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8", sec.Key("USER").String(), sec.Key("PASSWD").String(), sec.Key("HOST").String(), sec.Key("NAME").String())) if err != nil { log.FatalD(4, "Fail to init new engine: %v", err) } x.SetLogger(nil) x.SetMapper(core.GonicMapper{}) if err = x.Sync(new(PkgInfo), new(PkgRef)); err != nil { log.FatalD(4, "Fail to sync database: %v", err) } numOfPackages, _ = x.Count(new(PkgInfo)) c := cron.New() c.AddFunc("@every 5m", func() { numOfPackages, _ = x.Count(new(PkgInfo)) }) c.Start() }
// New creates a GoCelery instance with given config func New(config *Config) (*GoCelery, error) { if config.LogLevel == "" { config.LogLevel = "info" } if config.BrokerURL == "" { config.BrokerURL = "amqp://localhost" } gocelery := &GoCelery{ config: config, workerManager: &workerManager{ brokerURL: config.BrokerURL, }, cron: cron.New(), } // set up log level setupLogLevel(config) // try connect to worker if err := gocelery.workerManager.Connect(); err != nil { return nil, err } // start cron work gocelery.cron.Start() return gocelery, nil }
func startIsmonitor(daemonMode bool) { configFile, err := ioutil.ReadFile("config.json") if err != nil { log.Fatalln(err) } var config config err = json.Unmarshal(configFile, &config) if err != nil { log.Fatalln(err) } if daemonMode && config.CronSchedule == nil { fmt.Println("Daemon mode but no cron schedule specified. Quitting.") os.Exit(1) } if !daemonMode && config.CronSchedule != nil { fmt.Println("Daemon mode not specified but a cron schedule specified. Quitting.") os.Exit(1) } if config.CronSchedule != nil { cron := cron.New() cron.AddJob(*config.CronSchedule, monitorJob{&config}) cron.Start() defer cron.Stop() select {} } else { runIsmonitor(config) } }
// 后台运行的任务 func ServeBackGround() { if db.MasterDB == nil { return } // 初始化 七牛云存储 logic.DefaultUploader.InitQiniu() // 常驻内存的数据 go loadData() c := cron.New() // 每天对非活跃用户降频 c.AddFunc("@daily", decrUserActiveWeight) // 两分钟刷一次浏览数(TODO:重启丢失问题?信号控制重启?) c.AddFunc("@every 2m", logic.Views.Flush) if global.OnlineEnv() { // 每天生成 sitemap 文件 c.AddFunc("@daily", logic.GenSitemap) // 给用户发邮件,如通知网站最近的动态,每周的晨读汇总等 c.AddFunc("0 0 4 * * 1", logic.DefaultEmail.EmailNotice) } c.Start() }
func init() { var err error x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8", setting.Cfg.Section("database").Key("USER").String(), setting.Cfg.Section("database").Key("PASSWD").String(), setting.Cfg.Section("database").Key("HOST").String(), setting.Cfg.Section("database").Key("NAME").String())) if err != nil { log.Fatal(4, "Fail to init new engine: %v", err) } x.SetLogger(nil) if err = x.Sync(new(Package), new(Revision), new(Downloader), new(Block), new(BlockRule)); err != nil { log.Fatal(4, "Fail to sync database: %v", err) } statistic() c := cron.New() c.AddFunc("@every 5m", statistic) c.AddFunc("@every 1h", cleanExpireRevesions) c.Start() go cleanExpireRevesions() if setting.ProdMode { go uploadArchives() ticker := time.NewTicker(time.Hour) go func() { for _ = range ticker.C { uploadArchives() } }() } }
func SetCron(count int, cronOnly bool) { if !cronOnly { ScrapeOrgs() ScrapeHb(count) } c := cron.New() c.AddFunc("@every 3h", func() { log.Println("start scrapeHb") ScrapeHb(count) log.Println("end scrapeHb") }) c.AddFunc("@midnight", func() { log.Println("start scrapeOrgs") ScrapeOrgs() log.Println("end scrapeOrgs") }) c.Start() for { time.Sleep(10000000000000) fmt.Println("sleep") } }
// StartScheduler will start the scheduler process and handle requests // in and out. func StartScheduler(conf *config.Config) { c := cron.New() c.Start() db, err := scheduleDB(conf) if err != nil { log.Fatalln(err) } err = db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists([]byte(conf.Scheduler.SchedulerDBBucket)) if err != nil { return err } return nil }) if err != nil { log.Fatalln(err) } /*err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("iops")) b.ForEach(func(k, v []byte) error { c.AddFunc(string(k), func() { "some sort of rally execution code..." }) return nil }) return nil }) if err != nil { log.Fatalln(err) }*/ }
func main() { // Construct the dsn used for the database dsn := os.Getenv("DATABASE_USERNAME") + ":" + os.Getenv("DATABASE_PASSWORD") + "@tcp(" + os.Getenv("DATABASE_HOST") + ":" + os.Getenv("DATABASE_PORT") + ")/" + os.Getenv("DATABASE_NAME") // Construct a new AccessorGroup and connects it to the database ag := new(accessors.AccessorGroup) ag.ConnectToDB("mysql", dsn) // Constructs a new ControllerGroup and gives it the AccessorGroup cg := new(controllers.ControllerGroup) cg.Accessors = ag c := cron.New() c.AddFunc("0 0 20 * * 1-5", func() { // Run at 2:00pm MST (which is 21:00 UTC) Monday through Friday helpers.Webhook(helpers.ReportLeaders(ag)) }) c.Start() goji.Get("/health", cg.Health) goji.Get("/leaderboard", cg.ReportLeaders) goji.Post("/slack", cg.Slack) // The main endpoint that Slack hits goji.Post("/play", cg.User) goji.Post("/portfolio", cg.Portfolio) goji.Get("/check/:symbol", cg.Check) goji.Post("/buy/:quantity/:symbol", cg.Buy) goji.Post("/sell/:quantity/:symbol", cg.Sell) goji.Serve() }
func (ctx *cronContext) RefreshCron(messageSender plugins.MessageSender, channel string) { if ctx.cronClient[channel] != nil { ctx.cronClient[channel].Stop() ctx.cronClient[channel] = nil } c := cron.New() for _, activeCron := range ctx.getTaskMap(channel) { if !activeCron.Active { continue } cmd := activeCron.Command c.AddFunc(activeCron.Command.CronSpec, func() { message := cmd.Message() switch cmd.Action { case RandomAddAction: idx := rd.Intn(len(cmd.Args) - 1) log.Println(len(cmd.Args), idx, cmd.Args[idx]) message = cmd.Args[idx] } messageSender.SendMessage(message, channel) }) } c.Start() ctx.cronClient[channel] = c if ctx.repository != nil { ctx.repository.Save(ctx.cronTaskMap) } }
func main() { flag.Parse() err := bakapy.SetupLogging(*LOG_LEVEL) if err != nil { fmt.Println(err.Error()) os.Exit(1) } config, err := bakapy.ParseConfig(*CONFIG_PATH) if err != nil { fmt.Fprintf(os.Stderr, "Configuration error: %s\n", err) os.Exit(1) } logger.Debug(string(config.PrettyFmt())) scriptPool := bakapy.NewDirectoryScriptPool(config) metaman := bakapy.NewMetaManClient(config.MetadataAddr, config.Secret) notificators := bakapy.NewNotificatorPool() for _, ncConfig := range config.Notificators { nc := bakapy.NewScriptedNotificator(scriptPool, ncConfig.Name, ncConfig.Params) notificators.Add(nc) } scheduler := cron.New() for jobName, jobConfig := range config.Jobs { runSpec := jobConfig.RunAt.SchedulerString() if jobConfig.Disabled { logger.Warning("job %s disabled, skipping", jobName) continue } storageAddr, exist := config.Storages[jobConfig.Storage] if !exist { logger.Critical("cannot find storage %s definition in config", jobConfig.Storage) os.Exit(1) } executor := bakapy.NewBashExecutor(jobConfig.Args, jobConfig.Host, jobConfig.Port, jobConfig.Sudo) job := bakapy.NewJob( jobName, jobConfig, storageAddr, scriptPool, executor, metaman, notificators, ) logger.Info("adding job %s{%s} to scheduler", jobName, runSpec) err := scheduler.AddJob(runSpec, job) if err != nil { logger.Critical("cannot schedule job %s: %s", jobName, err) os.Exit(1) } } if *TEST_CONFIG_ONLY { return } scheduler.Start() <-(make(chan int)) }
func main() { sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT) api := anaconda.NewTwitterApi(c.AccessToken, c.AccessTokenSecret) c := cron.New() c.AddFunc("0 0 7 * * *", func() { log.Print("**** START ****") f := weather.GetWeatherForecast() if needUmbrella(f) { m := USER + " 傘持って行ってね♡" tweet, _ := api.PostTweet(m, nil) log.Print(tweet.Text) } log.Print("**** END ****") }) c.Start() select { case s := <-sigCh: log.Printf("Recv: %v\n", s) os.Exit(0) } }
// NewCronSchedule creates and starts new cron schedule and returns an instance of CronSchedule func NewCronSchedule(entry string) *CronSchedule { schedule := cron.New() return &CronSchedule{ entry: entry, schedule: schedule, enabled: false, } }
// New configures a new bot instance func New(h *Handlers) *Bot { b := &Bot{ handlers: h, cron: cron.New(), } b.startPeriodicCommands() return b }
func NewScheduler(pool *redis.Pool) *Scheduler { sched := &Scheduler{ cron: cron.New(), pool: pool, } return sched }
func init() { MainCron = cron.New() workPermits = make(chan struct{}, DEFAULT_JOB_POOL_SIZE) MainCron.Start() }
func main() { fmt.Println("Begin Send") c := cron.New() spec := "0 2 13 * * ?" c.AddFunc(spec, doSend) c.Start() select {} }
func LoadSchedules() error { dbConn, err := util.GetConnection(CLUSTERADMIN_DB) if err != nil { logit.Error.Println("BackupNow: error " + err.Error()) return err } defer dbConn.Close() logit.Info.Println("LoadSchedules called") var schedules []BackupSchedule schedules, err = GetSchedules(dbConn) if err != nil { logit.Error.Println("LoadSchedules error " + err.Error()) return err } if CRONInstance != nil { logit.Info.Println("stopping current cron instance...") CRONInstance.Stop() } //kill off the old cron, garbage collect it CRONInstance = nil //create a new cron logit.Info.Println("creating cron instance...") CRONInstance = cron.New() var cronexp string for i := 0; i < len(schedules); i++ { cronexp = getCron(schedules[i]) logit.Info.Println("would have loaded schedule..." + cronexp) if schedules[i].Enabled == "YES" { logit.Info.Println("schedule " + schedules[i].ID + " was enabled so adding it") x := DefaultJob{} x.request = BackupRequest{} x.request.ScheduleID = schedules[i].ID x.request.ServerID = schedules[i].ServerID x.request.ServerName = schedules[i].ServerName x.request.ServerIP = schedules[i].ServerIP x.request.ContainerName = schedules[i].ContainerName x.request.ProfileName = schedules[i].ProfileName CRONInstance.AddJob(cronexp, x) } else { logit.Info.Println("schedule " + schedules[i].ID + " NOT enabled so dropping it") } } logit.Info.Println("starting new CRONInstance") CRONInstance.Start() return err }
func (self *FoodPriceService) Init(configRawMsg *json.RawMessage, pushCh chan<- *service.PushMessage) error { self.started = false var c config err := json.Unmarshal(*configRawMsg, &c) if err != nil { return err } self.config = &c dbhelper, err := NewFoodPriceDbHelper(c.DbFile) if err != nil { return err } self.dbHelper = dbhelper l4g.Debug("Open food price DB successful: %s", c.DbFile) self.pushMsgChannel = pushCh self.cron = cron.New() self.cron.AddFunc(c.PricePushCron, func() { self.pushFoodPriceToUser() }) self.cron.AddFunc(c.PriceUpdateCron, func() { self.updateFoodPriceData() }) self.commandMap = map[string]processFunc{ "foodprice": (*FoodPriceService).getFoodPrice, "subprice": (*FoodPriceService).subFoodPrice, "unsubprice": (*FoodPriceService).unsubFoodPrice, } self.aliasCommandMap = map[string]string{ "菜价": "foodprice", "订阅菜价": "subprice", "退订菜价": "unsubprice", } self.cityCNNameMap = map[string]string{ "上海": "shanghai", } self.districtCNNameMap = map[string]string{ "浦东": "pudong", "黄浦": "huangpu", "徐汇": "xuhui", "长宁": "changning", "静安": "jingan", "普陀": "putuo", "闸北": "zhabei", "虹口": "hongkou", "杨浦": "yangpu", "宝山": "baoshan", "闵行": "minhang", "嘉定": "jiading", "金山": "jinshan", "松江": "songjiang", "青浦": "qingpu", "奉贤": "fengxian", "崇明": "chongming", } return nil }
// Create a new robot instance func New() *Robot { session, err := db.OpenSession("") if err != nil { logger.Error("Initialize robot failed, because %s", err.Error()) return nil } return &Robot{cron: cron.New(), Signal: make(chan os.Signal, 1), db: session.DB("")} }
func (s *Scheduler) ReStart(entries []*Entry) { log.Println("Restarting scheduler ...") s.Instance.Stop() s.Instance = cron.New() s.Instance.Start() for _, e := range entries { s.AddEntry(e) } }
func main() { wg.Add(1) c := cron.New() c.AddFunc("@every 8h30m", check) c.Start() wg.Wait() }