Exemple #1
0
//Run the app.
func Run() (err error) {
	var wg sync.WaitGroup
	log.Info("Starting gomasticate")
	conf, err := NewConf("conf.yaml")
	if nil != err {
		log.Error(err)
		return
	}
	log.Info(conf)
	chewChan := make(chan *messaging.Food, 2000)
	swallowChan := make(chan *messaging.Food, 4000)

	done := make(chan interface{})

	wg.Add(2)
	go lips.OpenWide(chewChan, done, &wg, conf.LipsPort())
	go chew.Chew(chewChan, swallowChan, &wg)

	sw := swallow.NewSwallow(conf.EsHost(), swallowChan, 10)

	//handle signals
	c := make(chan os.Signal, 1)
	s := make(chan int, 1)
	signal.Notify(c)
	go Death(c, s)
	death := <-s //time for shutdown
	log.Debug("Death return code: ", death)
	close(done)
	sw.Close()
	log.Info("Waiting for goroutines to finish...")
	wg.Wait()
	log.Info("Exiting")
	return
}
Exemple #2
0
func CheckIn(path string) {
	log.Info("Starting db checking background script: " + path)
	ticker := time.NewTicker(10 * time.Second)
	quit := make(chan struct{})
	go func() {
		for {
			select {
			case <-ticker.C:
				nodeinfo.UpdateConnection()
				log.Info("Checking all changed stuff in db for: " + path)
				listener := utils.GetListenerFromDir(path)
				items, err := datastore.CheckIn(listener)
				if err != nil {
					log.Infof("Error occurred getting data for %s (%s): %+v", listener, err.Error(), err)
				}
				cfg := utils.GetConfig()
				handleDataChanges(items, cfg.Listeners[listener], listener)
			// @TODO: check that db knows Im alive.
			case <-quit:
				ticker.Stop()
				return
			}
		}
	}()
}
Exemple #3
0
func loadNotifiers(app *ApplicationContext) error {
	// Set up the Emailer, if configured
	if len(app.Config.Email) > 0 {
		log.Info("Configuring Email notifier")
		emailer, err := NewEmailer(app)
		if err != nil {
			log.Criticalf("Cannot configure email notifier: %v", err)
			return err
		}
		app.Emailer = emailer
	}

	// Set up the HTTP Notifier, if configured
	if app.Config.Httpnotifier.Url != "" {
		log.Info("Configuring HTTP notifier")
		httpnotifier, err := NewHttpNotifier(app)
		if err != nil {
			log.Criticalf("Cannot configure HTTP notifier: %v", err)
			return err
		}
		app.HttpNotifier = httpnotifier
	}

	return nil
}
Exemple #4
0
func (m *Management) Shutdown() {
	log.Info("Management: shutdown")
	close(m.shutdown)

	m.waitGroup.Wait()
	log.Info("Management: shutdown done")
}
Exemple #5
0
func main() {
	f, err := os.Open("ok.txt")
	if err != nil {
		log.Info(err)
		return
	}
	defer f.Close()
	stat, err := f.Stat()
	if err != nil {
		log.Error("stat err")
	}
	data := make([]byte, stat.Size())
	_, err = f.Read(data)
	if err != nil {
		log.Error("read err")
	}
	dataStr := string(data)

	log.Info(dataStr)

	dirInfo()

	demoList()

}
Exemple #6
0
//Simple pusher for testing
func PusherProto(count int, finished chan int, msg *messaging.Food, port int) {
	log.Info("Starting pusher")
	socket, err := nano.NewPushSocket()
	if nil != err {
		log.Error(err)
	}
	defer socket.Close()
	socket.SetSendTimeout(500 * time.Millisecond)
	sport := strconv.Itoa(port)
	_, err = socket.Connect("tcp://localhost:" + sport)
	if nil != err {
		log.Error(err)
		return
	}
	log.Info("Connected and ready to send data")
	tot := 0
	for {
		bytes, _ := msg.Marshal()
		_, err := socket.Send(bytes, 0) //blocking
		if nil != err {
			log.Error(err)
			continue
		} else {
			tot++
		}
		if tot >= count {
			break
		}
	}
	log.Info("Finished sending data exiting")
	finished <- tot
}
Exemple #7
0
func InitialSync() {
	cfg := utils.GetConfig()
	log.Info("Verifying DB Tables")
	datastore.CreateDB()
	log.Info("Initial sync starting...")

	for key, listener := range cfg.Listeners {

		// First check to see if the table is empty and do a full import false == not empty
		if datastore.CheckEmpty(key) == false {
			// Database is not empty so pull the updates and match locally
			items := datastore.FetchAll(key)
			handleDataChanges(items, listener, key)
		} else {
			// Database is empty so lets import
			fsItems := utils.ListFilesInDir(listener.Directory)
			for _, item := range fsItems {
				success := datastore.Insert(key, item)
				if success != true {
					log.Infof("An error occurred inserting %x to database", item)
				}
				if !item.IsDir {
					storage.PutFile(item.Filename, key)
				}

			}
		}

	}

	log.Info("Initial sync completed...")
}
Exemple #8
0
func (ss *Session) heartBeat() chan bool {
	done := make(chan bool)
	ss.heartTicker = done
	go func() {
		t := time.Duration(ss.server.Config.HeartbeatInterval) * time.Second
		ticker := time.NewTicker(t)
		loop := true
		for loop {
			select {
			case <-ticker.C:
				err := ss.defaultNS.sendPacket(new(heartbeatPacket))
				if err != nil {

				}
				log.Info("sent heart beat missed = ", ss.missedHeartbeats)
				ss.missedHeartbeats += 1
				// TODO: Configurable
				if ss.missedHeartbeats > 2 {
					log.Info("heartBeat missedHeartbeats ", ss.SessionId)
					ss.Close("")
					loop = false
				}
			case <-done:
				log.Infof("[%s] stop heartBeat", ss.SessionId)
				ticker.Stop()
				//ss.heartTicker = nil
				return
			}
		}
	}()
	return done
}
Exemple #9
0
func (ss *Session) RawMessage(msg []byte) error {
	log.Trace("RawMessage ", string(msg))
	packet, err := decodePacket(msg)
	if err != nil {
		log.Info("decodePacket error ", err, string(msg))
		return nil
	}
	if packet == nil {
		log.Info("packet == nil ")
		return nil

	}

	if packet.EndPoint() == "" {
		if err := ss.onPacket(packet); err != nil {
			log.Error(err)
			return nil
		}
	}

	ns := ss.Of(packet.EndPoint())
	if ns == nil {
		return nil
	}
	ns.onPacket(packet)
	return nil
}
func calcDailySummary(now time.Time, config StartupConfig, runningConfig RunningConfig) {
	log.Infof("lastSummaryTime is %v", runningConfig.LastSummaryTime)
	if runningConfig.LastSummaryTime.Day() != now.Day() {
		startTime := now.Truncate(24 * time.Hour).Add(-24 * time.Hour)
		endTime := startTime.Add(24 * time.Hour)
		log.Info("Summarizing from ", startTime, " (", startTime.Unix(), ") to ", endTime, " (", endTime.Unix(), ")")

		// influx connection
		influxClient, err := influxConnect(config, runningConfig)
		if err != nil {
			log.Error("Could not connect to InfluxDb to get daily summary stats!!")
			errHndlr(err, ERROR)
			return
		}

		bp, _ := influx.NewBatchPoints(influx.BatchPointsConfig{
			Database:        "daily_stats",
			Precision:       "s",
			RetentionPolicy: config.DailySummaryRetentionPolicy,
		})

		calcDailyMaxGbps(influxClient, bp, startTime, endTime, config)
		calcDailyBytesServed(influxClient, bp, startTime, endTime, config)
		log.Info("Collected daily stats @ ", now)
	}
}
Exemple #11
0
/**
 * watch the node list change.
 */
func (self *NodeInfoMaps) WatchNodeInfoMap() {
	_, _, ch, err := self.zk.GetZkConn().ChildrenW("/yundis/nodes")
	if err != nil {
		log.Errorf("Can not watch path /yundis/nodes, err:%s", err)
	}
	go func() {
		for {
			event := <-ch
			log.Infof("node list change, %+v", event)
			children, _, ch1, err1 := self.zk.GetZkConn().ChildrenW("/yundis/nodes")
			if err1 == nil {
				ch = ch1
				//handle the node list change event
				log.Infof("node list changed : %s", children)
				infoMap := self.getNodeInfoMapFromZk()
				//change the slotinfo state.
				log.Info("The node list changed, begin to change the affected slot's info.")
				self.SetNodeInfoMap(infoMap) //refresh nodeinfo map by new zk data.
				self.ModifySlotState(infoMap)
				log.Info("Refresh nodeinfo map by new zk data.")
			} else {
				log.Errorf("Can not watching the children of /yundis/nodes, err:%s", err1)
				break
			}
			time.Sleep(time.Second)
		}
	}()
}
Exemple #12
0
func Chew(chewChan <-chan *messaging.Food, swallowChan chan *messaging.Food, wg *sync.WaitGroup) {
	log.Info("Let the chewing begin!")
	defer close(swallowChan)
	r := rep.NewReporter()
	r.RegisterStatWIndex("chew", "good")
	for msg := range chewChan {
		if nil != msg {
			//parsing work here probably change what our message type looks like when swallowed

			date := time.Unix(0, msg.GetTimeNano()).UTC()
			fmtDate := date.Format("2006-01-02")
			indexType := "all"
			customerId := "id" //should exist eventually
			index := "documents-" + customerId + "-" + fmtDate
			msg.Index = &index
			msg.IndexType = &indexType
			r.AddStatWIndex("chew", 1, "good")
			swallowChan <- msg
		}
	}
	log.Info("Done chewing")
	log.Flush()
	wg.Done()

}
Exemple #13
0
func RunReceiverRPC() {
	log.Info("Starting Receiver RPC\n")
	ln, err := net.ListenUnix("unix", appAddr)
	if err != nil {
		log.Errorf("Failed to start RPC server: %v", err)
		return
	}

	Receiver = receiverrpc.Receiver{}
	rpc.Register(&Receiver)
	log.Infof("RPC server is listening on %s\n", appAddr.String())

	defer func() {
		// close the listener sock
		log.Info("Closing listener socket.\n")
		ln.Close()
	}()

	for {
		ln.SetDeadline(time.Now().Add(time.Second))
		conn, err := ln.AcceptUnix()
		if err != nil {
			if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
				// just accept timeout, not an error
				continue
			}
			log.Error("Failed to accept: %s", err.Error())
			continue
		}
		rpc.ServeConn(conn)
	}
}
Exemple #14
0
func (s *RtmpNetStream) SendVideo(video *StreamPacket) error {
	log.Debug(video)
	if s.vkfsended {
		video.Timestamp -= s.vsend_time - uint32(s.bufferTime)
		s.vsend_time += video.Timestamp
		return sendVideo(s.conn, video)
	}
	if !video.isKeyFrame() {
		//log.Info("No Video Key Frame,Ignore Video ", video)
		//video = s.dispatcher.producer.lastVideoKeyFrame
		return nil
	}
	fkf := s.dispatcher.producer.firstVideoKeyFrame
	if fkf == nil {
		log.Info("No Video Configurate Record,Ignore Video ", video)
		return nil
	}
	fkf.Timestamp = 0
	log.Info("Send Video Configurate Record ", fkf)
	//log.Infof(" Payload %02X", fkf.Payload)
	ver := fkf.Payload[4+1]
	avcPfofile := fkf.Payload[4+2]
	profileCompatibility := fkf.Payload[4+3]
	avcLevel := fkf.Payload[4+4]
	reserved := fkf.Payload[4+5] >> 2
	lengthSizeMinusOne := fkf.Payload[4+5] & 0x03
	reserved2 := fkf.Payload[4+6] >> 5
	numOfSPS := fkf.Payload[4+6] & 31
	spsLength := util.BigEndian.Uint16(fkf.Payload[4+7:])
	sps := fkf.Payload[4+9 : 4+9+int(spsLength)]
	numOfPPS := fkf.Payload[4+9+int(spsLength)]
	ppsLength := util.BigEndian.Uint16(fkf.Payload[4+9+int(spsLength)+1:])
	pps := fkf.Payload[4+9+int(spsLength)+1+2:]
	log.Infof("  cfgVersion(%v) | avcProfile(%v) | profileCompatibility(%v) |avcLevel(%v) | reserved(%v) | lengthSizeMinusOne(%v) | reserved(%v) | numOfSPS(%v) |spsLength(%v) | sps(%02X) | numOfPPS(%v) | ppsLength(%v) | pps(%02X) ",
		ver,
		avcPfofile,
		profileCompatibility,
		avcLevel,
		reserved,
		lengthSizeMinusOne,
		reserved2,
		numOfSPS,
		spsLength,
		sps,
		numOfPPS,
		ppsLength,
		pps)
	err := sendFullVideo(s.conn, fkf)
	if err != nil {
		return err
	}
	s.vkfsended = true
	s.vsend_time = video.Timestamp
	video.Timestamp = 0
	log.Info("Send I Frame ", video)
	log.Infof(" Payload %v/%v", video.Payload[9]&0x1f, video.Payload[10])

	return sendFullVideo(s.conn, video)
}
Exemple #15
0
func main() {
	log.Info("Initialize!")

	var proxy = service.Proxy{}
	var refresh = service.Refresh{}

	log.Info("Run!")

	refresh.Run(&proxy)
}
Exemple #16
0
func CheckEmpty(table string) bool {
	setdbstoreEngine()
	empty := dbstore.CheckEmpty(table)
	if empty {
		log.Info("Database is EMPTY, starting creation")
	} else {
		log.Info("Using existing table: " + table)
	}
	return empty
}
func libWithSealogMain() {
	defer library.FlushLog()
	defer log.Flush()
	loadAppConfig()
	log.Info("App started")
	log.Info("Config loaded")

	// Disable library log
	log.Info("* Disabled library log test")
	library.DisableLog()
	calcF2()
	log.Info("* Disabled library log tested")

	// Use a special logger for library
	log.Info("* Special output test")
	specialOutputConfig()
	calcF2()
	log.Info("* Special output tested")

	// Use the same logger for both app and library
	log.Info("* Same output test")
	sameOutputConfig()
	calcF2()
	log.Info("* Same output tested")

	log.Info("App finished")
}
func (fw *TrecFileWriter) WriteAllTokens() {
	log.Debugf("Monitoring the writer channel")
	for t := range fw.StringChan {
		log.Debugf("Received %s. Writing it out to disk.", *t)
		fw.file.WriteString(*t + "\n")
	}
	fw.file.Close()
	log.Info("Sending exit signal")
	fw.Done <- true
	log.Info("Writer, out!")
}
Exemple #19
0
func (c *Conn) writeLoop() {
	for {
		select {
		case <-c.exitChan:
			clog.Info("breaking out of writeLoop")
			// Indicate drainReady because we will not pull any more off msgResponseChan
			close(c.drainReady)
			goto exit
		case cmd := <-c.cmdChan:
			err := c.WriteCommand(cmd)
			if err != nil {
				clog.Errorf("error sending command %s - %s", cmd, err)
				c.close()
				continue
			}
		case resp := <-c.msgResponseChan:
			// Decrement this here so it is correct even if we can't respond to nsqd
			msgsInFlight := atomic.AddInt64(&c.messagesInFlight, -1)

			if resp.success {
				clog.Tracef("FIN %s", resp.msg.ID)
				c.delegate.OnMessageFinished(c, resp.msg)
				if resp.backoff {
					c.delegate.OnResume(c)
				}
			} else {
				clog.Tracef("REQ %s", resp.msg.ID)
				c.delegate.OnMessageRequeued(c, resp.msg)
				if resp.backoff {
					c.delegate.OnBackoff(c)
				}
			}

			err := c.WriteCommand(resp.cmd)
			if err != nil {
				clog.Errorf("error sending command %s - %s", resp.cmd, err)
				c.close()
				continue
			}

			if msgsInFlight == 0 &&
				atomic.LoadInt32(&c.closeFlag) == 1 {
				c.close()
				continue
			}
		}
	}

exit:
	c.wg.Done()
	clog.Info("writeLoop exiting")
}
Exemple #20
0
// run starts the asynchronous run-loop connecting to RabbitMQ
func (t *rabbitTransport) run() {
	initConn := func() *RabbitConnection {
		conn := NewRabbitConnection()
		t.connM.Lock()
		defer t.connM.Unlock()
		t.conn = conn
		select {
		case <-t.connReady:
			// Only swap connReady if it's already closed
			t.connReady = make(chan struct{})
		default:
		}
		return conn
	}
	conn := initConn()

	t.tomb.Go(func() error {
		defer func() {
			t.killListeners()
			conn.Close()
			log.Info("[Typhon:RabbitTransport] Dead; connection closed")
		}()

	runLoop:
		for {
			log.Info("[Typhon:RabbitTransport] Run loop connecting…")
			select {
			case <-t.tomb.Dying():
				return nil

			case <-conn.Init():
				log.Info("[Typhon:RabbitTransport] Run loop connected")
				t.listenReplies()

				select {
				case <-t.tomb.Dying():
					// Do not loop again
					return nil
				default:
					conn.Close()
					conn = initConn()
					continue runLoop
				}

			case <-time.After(connectTimeout):
				log.Criticalf("[Typhon:RabbitTransport] Run loop timed out after %s waiting to connect",
					connectTimeout.String())
				return ErrCouldntConnect
			}
		}
	})
}
Exemple #21
0
func (p *Broadcast) start() {
	//p.terminal = newTerminal()
	go func(p *Broadcast) {
		defer func() {
			if e := recover(); e != nil {
				log.Critical(e)
			}
			log.Info("Broadcast " + p.path + " stopped")
		}()
		log.Info("Broadcast " + p.path + " started")
		for {
			select {
			case amsg := <-p.producer.audiochan:
				for _, s := range p.consumers {
					err := s.SendAudio(amsg.Clone())
					if err != nil {
						notifyError(s, err)
					}
				}
			case vmsg := <-p.producer.videochan:
				for _, s := range p.consumers {
					err := s.SendVideo(vmsg.Clone())
					if err != nil {
						notifyError(s, err)
					}
				}
			case obj := <-p.control:
				if c, ok := obj.(*RtmpNetStream); ok {
					if c.closed {
						delete(p.consumers, c.conn.remoteAddr)
						log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers))
					} else {
						p.consumers[c.conn.remoteAddr] = c
						log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers))
					}
				} else if v, ok := obj.(string); ok && "stop" == v {
					for k, ss := range p.consumers {
						delete(p.consumers, k)
						ss.Close()
					}
					return
				}
			case <-time.After(time.Second * 90):
				log.Warn("Broadcast " + p.path + " Video | Audio Buffer Empty,Timeout 30s")
				p.stop()
				p.producer.Close()
				return
			}
		}
	}(p)

}
Exemple #22
0
func stopNotifiers(app *ApplicationContext) {
	// Ignore errors on unlock - we're quitting anyways, and it might not be locked
	app.NotifierLock.Unlock()

	if app.Emailer != nil {
		log.Info("Stopping Email notifier")
		app.Emailer.Stop()
	}
	if app.HttpNotifier != nil {
		log.Info("Stopping HTTP notifier")
		app.HttpNotifier.Stop()
	}
}
Exemple #23
0
// GetBytes - get []bytes array for a certain path on the to session.
// returns the raw body
func (to *Session) getBytes(path string) ([]byte, error) {
	var body []byte
	resp, err := to.UserAgent.Get(to.Url + path)
	if err != nil {
		log.Info(err)
		return body, err
	}

	body, err = ioutil.ReadAll(resp.Body)
	if err != nil {
		log.Info(err)
	}
	return body, err
}
Exemple #24
0
func InitializeWithPath(databaseFile string) (*Database, error) {
	dbExists, err := checkDatabaseExists(databaseFile)
	if err != nil {
		return nil, common.NewError("Cannot access database file", err)
	}

	db, err := sql.Open("sqlite3", databaseFile)
	if err != nil {
		return nil, common.NewError("Cannot open database", err)
	}
	dbmap := modl.NewDbMap(db, modl.SqliteDialect{})

	database := &Database{dbmap}

	if !dbExists {
		log.Info("Creating tables")
		if err = database.createTables(); err != nil {
			return nil, common.NewError("Cannot create tables", err)
		}
	}

	database.prepareTables()

	return database, nil
}
Exemple #25
0
// StartSupervised starts the ECS Agent and ensures it stays running, except for terminal errors (indicated by an agent exit code of 5)
func (e *Engine) StartSupervised() error {
	agentExitCode := -1
	for agentExitCode != terminalSuccessAgentExitCode && agentExitCode != terminalFailureAgentExitCode {
		err := e.docker.RemoveExistingAgentContainer()
		if err != nil {
			return engineError("could not remove existing Agent container", err)
		}

		log.Info("Starting Amazon EC2 Container Service Agent")
		agentExitCode, err = e.docker.StartAgent()
		if err != nil {
			return engineError("could not start Agent", err)
		}
		log.Infof("Agent exited with code %d", agentExitCode)
		if agentExitCode == upgradeAgentExitCode {
			err = e.upgradeAgent()
			if err != nil {
				log.Error("could not upgrade agent", err)
			}
		}
	}
	if agentExitCode == terminalFailureAgentExitCode {
		return errors.New("agent exited with terminal exit code")
	}
	return nil
}
Exemple #26
0
/**
 * watch the slot list change.
 */
func (self *SlotInfoMaps) WatchSlotInfoMap() {
	_, _, ch, err := self.zk.GetZkConn().GetW("/yundis/ids")
	if err != nil {
		log.Errorf("Can not watch path /yundis/ids, err:%s", err)
	}

	go func() {
		for {
			event := <-ch
			log.Infof("Slotinfo list changed event, %+v", event)
			data, _, ch1, err1 := self.zk.GetZkConn().GetW("/yundis/ids")
			if err1 == nil {
				ch = ch1
				//handle the node list change event
				log.Infof("Slotinfo list changed : %s", data)
				infoMap := self.GetSlotInfoMapFromZk()
				//change the slotinfo state.
				self.SetSlotInfoMap(infoMap) //refresh nodeinfo map by new zk data.
				log.Info("Refresh slotinfo map by new zk data.")
			} else {
				log.Errorf("Can not watching the children of /yundis/ids, err:%s", err1)
				break
			}
			time.Sleep(time.Second)
		}
	}()
}
Exemple #27
0
func processFile(req uploadRequest, db *database.DB, store *storage.Store) {
	defer req.file.Close()

	epub, err := openMultipartEpub(req.file)
	if err != nil {
		log.Warn("Not valid epub uploaded file ", req.filename, ": ", err)
		return
	}
	defer epub.Close()

	book, id := parseFile(epub, store)
	req.file.Seek(0, 0)
	size, err := store.Store(id, req.file, EPUB_FILE)
	if err != nil {
		log.Error("Error storing book (", id, "): ", err)
		return
	}

	book["filesize"] = size
	err = db.AddBook(book)
	if err != nil {
		log.Error("Error storing metadata (", id, "): ", err)
		return
	}
	log.Info("File uploaded: ", req.filename)
}
Exemple #28
0
func (c *serverConn) pingLoop() chan bool {
	ticker := time.NewTicker(c.pingInterval - 2*time.Second)
	ping := make(chan bool)
	go func() {
		for {
			select {
			case <-ticker.C:
				{
					c.defaultNS.sendPacket(new(heartbeatPacket))
					n := atomic.AddInt32(&c.missedHeartbeats, 1)

					// TODO: Configurable
					if n > 2 {
						log.Info("heartBeat missedHeartbeats ", c.Id())
						ticker.Stop()
						c.Close()
						return
					}
				}

			case <-ping:
				ticker.Stop()
				return
			}
		}
	}()
	return ping

}
// poll all known lookup servers every LookupdPollInterval
func (r *Consumer) lookupdLoop() {
	// add some jitter so that multiple consumers discovering the same topic,
	// when restarted at the same time, dont all connect at once.
	jitter := time.Duration(int64(r.rng.Float64() *
		r.config.LookupdPollJitter * float64(r.config.LookupdPollInterval)))
	ticker := time.NewTicker(r.config.LookupdPollInterval)

	select {
	case <-time.After(jitter):
	case <-r.exitChan:
		goto exit
	}

	for {
		select {
		case <-ticker.C:
			r.queryLookupd()
		case <-r.lookupdRecheckChan:
			r.queryLookupd()
		case <-r.exitChan:
			goto exit
		}
	}

exit:
	ticker.Stop()
	clog.Info("exiting lookupdLoop")
	r.wg.Done()
}
Exemple #30
0
func setup_logging() {
	level := "info"
	if *g_debug == true {
		level = "debug"
	}
	config := fmt.Sprintf(`
<seelog type="sync" minlevel="%s">
	<outputs formatid="main">
		<console/>
	</outputs>
	<formats>
		<format id="main" format="%%Date %%Time [%%LEVEL] %%File|%%FuncShort|%%Line: %%Msg%%n"/>
	</formats>
</seelog>`, level)

	logger, err := log.LoggerFromConfigAsBytes([]byte(config))

	if err != nil {
		fmt.Println("Failed to config logging:", err)
		os.Exit(1)
	}

	log.ReplaceLogger(logger)

	log.Info("Logging config is successful")
}