func (mg *Migrator) exec(m Migration) error { if mg.LogLevel <= log.INFO { log.Info("Migrator: exec migration id: %v", m.Id()) } err := mg.inTransaction(func(sess *xorm.Session) error { condition := m.GetCondition() if condition != nil { sql, args := condition.Sql(mg.dialect) results, err := sess.Query(sql, args...) if err != nil || len(results) == 0 { log.Info("Migrator: skipping migration id: %v, condition not fulfilled", m.Id()) return sess.Rollback() } } _, err := sess.Exec(m.Sql(mg.dialect)) if err != nil { log.Error(3, "Migrator: exec FAILED migration id: %v, err: %v", m.Id(), err) return err } //run additiona migration code. return m.ExecOnSuccess(sess) }) if err != nil { return err } return nil }
func Publish(e Event, attempts int) error { if handlers == nil { // not initialized. return nil } raw, err := NewRawEventFromEvent(e) if err != nil { return err } raw.Attempts = attempts + 1 body, err := json.Marshal(raw) if err != nil { return err } msg := Message{ RoutingKey: e.Type(), Payload: body, } ticker := time.NewTicker(2 * time.Second) pre := time.Now() WAITLOOP: for { select { case <-ticker.C: log.Error(3, "blocked writing to event publish channel for %f seconds", time.Since(pre).Seconds()) case pubChan <- msg: ticker.Stop() break WAITLOOP } } return nil }
func handleJobs(c chan jobqueue.Message, jobQueue chan<- *m.AlertingJob) { for message := range c { go func(msg jobqueue.Message) { j := &m.AlertingJob{} err := json.Unmarshal(msg.Payload, j) if err != nil { log.Error(3, "unable to unmarshal Job. %s", err) return } jobQueue <- j }(message) } }
func loadSpecifedConfigFile(configFile string) { if configFile == "" { configFile = filepath.Join(HomePath, "conf/custom.ini") // return without error if custom file does not exist if !pathExists(configFile) { return } } userConfig, err := ini.Load(configFile) userConfig.BlockMode = false if err != nil { log.Fatal(3, "Failed to parse %v, %v", configFile, err) } for _, section := range userConfig.Sections() { for _, key := range section.Keys() { if key.Value() == "" { continue } defaultSec, err := Cfg.GetSection(section.Name()) if err != nil { log.Error(3, "Unknown config section %s defined in %s", section.Name(), configFile) continue } defaultKey, err := defaultSec.GetKey(key.Name()) if err != nil { log.Error(3, "Unknown config key %s defined in section %s, in file %s", key.Name(), section.Name(), configFile) continue } defaultKey.SetValue(key.Value()) } } configFiles = append(configFiles, configFile) }
func handleMessages(c chan Message) { for m := range c { go func(msg Message) { e := RawEvent{} err := json.Unmarshal(msg.Payload, &e) if err != nil { log.Error(3, "unable to unmarshal event Message. %s", err) return } log.Debug("processing event of type %s", e.Type) //broadcast the event to listeners. for _, ch := range handlers.GetListeners(e.Type) { ch <- e } }(m) } }
func processMailQueue() { for { select { case msg := <-mailQueue: num, err := buildAndSend(msg) tos := strings.Join(msg.To, "; ") info := "" if err != nil { if len(msg.Info) > 0 { info = ", info: " + msg.Info } log.Error(4, fmt.Sprintf("Async sent email %d succeed, not send emails: %s%s err: %s", num, tos, info, err)) } else { log.Trace(fmt.Sprintf("Async sent email %d succeed, sent emails: %s%s", num, tos, info)) } } } }
func (mg *Migrator) Start() error { if mg.LogLevel <= log.INFO { log.Info("Migrator: Starting DB migration") } logMap, err := mg.GetMigrationLog() if err != nil { return err } for _, m := range mg.migrations { _, exists := logMap[m.Id()] if exists { if mg.LogLevel <= log.DEBUG { log.Debug("Migrator: Skipping migration: %v, Already executed", m.Id()) } continue } sql := m.Sql(mg.dialect) record := MigrationLog{ MigrationId: m.Id(), Sql: sql, Timestamp: time.Now(), } if mg.LogLevel <= log.DEBUG { log.Debug("Migrator: Executing SQL: \n %v \n", sql) } if err := mg.exec(m); err != nil { log.Error(3, "Migrator: error: \n%s:\n%s", err, sql) record.Error = err.Error() mg.x.Insert(&record) return err } else { record.Success = true mg.x.Insert(&record) } } return nil }