Ejemplo n.º 1
0
func (migrator *Migrator) migrateCase(c *desk.Case) error {
	log.Infof("Migrating case %d.", c.Id)

	err := migrator.checkCase(c)
	if err != nil {
		return err
	}

	err = migrator.checkCustomer(c)
	if err != nil {
		return err
	}

	log.Infof("Migrating customer.")
	requester, err := migrator.migrateCustomer(c.Customer)
	if err != nil || requester.Email == "" {
		return fmt.Errorf("Can't migrate customer: %v. Aborting.", err)
	}
	log.Infof("Customer %s migrated.", requester.Email)

	assigneeId := migrator.migrateAssignee(c)
	log.Infof("Setting assignee %d.", assigneeId)

	err = migrator.migrateTicket(c, requester, assigneeId)
	if err != nil {
		return err
	}

	log.Infof("Finished case %d.", c.Id)
	return nil
}
func (imageManager *dockerImageManager) deleteImage(imageID string, imageState *image.ImageState) {
	if imageID == "" {
		seelog.Errorf("Image ID to be deleted is null")
		return
	}
	seelog.Infof("Removing Image: %s", imageID)
	err := imageManager.client.RemoveImage(imageID, removeImageTimeout)
	if err != nil {
		if err.Error() == imageNotFoundForDeletionError {
			seelog.Errorf("Image already removed from the instance: %v", err)
		} else {
			seelog.Errorf("Error removing Image %v - %v", imageID, err)
			delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID)
			return
		}
	}
	seelog.Infof("Image removed: %v", imageID)
	imageState.RemoveImageName(imageID)
	if len(imageState.Image.Names) == 0 {
		delete(imageManager.imageStatesConsideredForDeletion, imageState.Image.ImageID)
		imageManager.removeImageState(imageState)
		imageManager.state.RemoveImageState(imageState)
		imageManager.saver.Save()
	}
}
Ejemplo n.º 3
0
func GetNodeCopy(item utils.DataTable, listener string, uid, gid int, perms string) bool {
	cfg := utils.GetConfig()
	aliveNodes := nodeinfo.GetNodes()
	for _, node := range aliveNodes {
		log.Infof("Trying download from: %s", node.NodeIPs)
		nIPs := strings.Split(node.NodeIPs, ",")
		for _, ipAddress := range nIPs {
			resp, err := getData(ipAddress, cfg.ServerConfig.ListenPort, listener, utils.GetRelativePath(listener, item.Path))
			if err == nil {
				defer resp.Body.Close()
				if resp.Status == "404" {
					log.Infof("File not found: %s", item.Path)
					return false
				}
				size, err := utils.FileWrite(item.Path, resp.Body, uid, gid, perms)
				if err != nil {
					log.Infof("Cannot write file: %s", item.Path)
					return false
				} else {
					log.Infof("%s with %v bytes downloaded", item.Path, size)
					return true
				}
				return false
			} else {
				return false
			}
		}
	}
	return false
}
Ejemplo n.º 4
0
// Start TriggerTasks instance.
// NOTICE: All task are started by goroutine.
func (this *TimerTask) Start() {
	this.initTickerTask() // init timer tickers

	this.State = Running
	for _, task := range this.tickerTask {
		go func(task *TickerTask) { // start by goroutine
			for this.State == Running {
				select {
				case t := <-task.Tk.C:
					Log.Infof("Ticker task %s @(%s)", task.Name, t.Format("2006-01-02 15:04:05"))

					s := time.Now().UnixNano()
					task.TickerHd()
					e := time.Now().UnixNano()

					usage := strconv.FormatFloat(float64((e-s)/1000000), 'f', 2, 32)
					Log.Infof("Task %s finished, time: %s ms", task.Name, usage)
				default:
					time.Sleep(DefaultSleepDur)
				}
			}
		}(task)
	}
	Log.Infof("Tricker Thread, start")
}
Ejemplo n.º 5
0
func Do_push(raw_data []byte) error {
	msg := &XPush.CommonMsg{}
	err := proto.Unmarshal(raw_data, msg)
	if err != nil {
		log.Warnf("Do_push | proto Unmarshal failed, err:%s", err.Error())
		return err
	}

	ios_msg := msg.GetIosMsg()
	appid := ios_msg.GetAppid()
	log.Infof("Do_push |  appid:%s", appid)

	// 打包待发送消息
	m, err := pkg_apns_msg(msg)
	if err != nil {
		log.Warnf("Do_push | pkg_apns_msg err:%s", err.Error())
		return err
	}
	log.Infof("Do_push | msg:%s", m)

	// 推送消息到apns
	// 应用状态,生成环境为1,开发环境为2
	env := ios_msg.GetEnvironment()
	push_to_apns(appid, env, m)

	return nil
}
Ejemplo n.º 6
0
func (fr *TrecFileReader) Read() Document {
	go fr.read_to_chan(1)
	log.Infof("Waiting to read from channel")
	doc := <-fr.documents
	log.Infof("Read Document %s from channel", doc.Identifier())
	return doc
}
Ejemplo n.º 7
0
Archivo: peer.go Proyecto: xtfly/gofd
// This func is designed to be run as a goroutine. It
// listens for messages from the peer and forwards them to a channel.
func (p *peer) peerReader(msgChan chan peerMessage) {
	log.Infof("[%s] Reading messages from peer[%s]", p.taskID, p.address)
	for {
		var n uint32
		n, err := readNBOUint32(p.conn)
		if err != nil {
			break
		}
		if n > maxBlockLen {
			log.Error("[", p.taskID, "] Message size too large: ", n)
			break
		}

		var buf []byte
		if n == 0 {
			// keep-alive - we want an empty message
			buf = make([]byte, 1)
		} else {
			buf = make([]byte, n)
		}

		_, err = io.ReadFull(p.conn, buf)
		if err != nil {
			break
		}
		msgChan <- peerMessage{p, buf}
	}

	msgChan <- peerMessage{p, nil}
	log.Infof("[%s] Exiting reading messages from peer[%s]", p.taskID, p.address)
}
Ejemplo n.º 8
0
/**
 * watch the node list change.
 */
func (self *NodeInfoMaps) WatchNodeInfoMap() {
	_, _, ch, err := self.zk.GetZkConn().ChildrenW("/yundis/nodes")
	if err != nil {
		log.Errorf("Can not watch path /yundis/nodes, err:%s", err)
	}
	go func() {
		for {
			event := <-ch
			log.Infof("node list change, %+v", event)
			children, _, ch1, err1 := self.zk.GetZkConn().ChildrenW("/yundis/nodes")
			if err1 == nil {
				ch = ch1
				//handle the node list change event
				log.Infof("node list changed : %s", children)
				infoMap := self.getNodeInfoMapFromZk()
				//change the slotinfo state.
				log.Info("The node list changed, begin to change the affected slot's info.")
				self.SetNodeInfoMap(infoMap) //refresh nodeinfo map by new zk data.
				self.ModifySlotState(infoMap)
				log.Info("Refresh nodeinfo map by new zk data.")
			} else {
				log.Errorf("Can not watching the children of /yundis/nodes, err:%s", err1)
				break
			}
			time.Sleep(time.Second)
		}
	}()
}
Ejemplo n.º 9
0
Archivo: server.go Proyecto: houcy/push
func (this *Server) Stop() {
	// close后,所有的ctrl都返回false
	log.Infof("stopping comet server")
	close(this.ctrl)
	this.wg.Wait()
	log.Infof("comet server stopped")
}
Ejemplo n.º 10
0
// Monitor sentinel
func MonitorSentinel() {
	redisConn := gRedisPool.Get()
	defer redisConn.Close()

	psc := redis.PubSubConn{redisConn}
	psc.PSubscribe("*")
	runflag := true
	for runflag {
		switch v := psc.Receive().(type) {
		case redis.Message:
			log.Infof("Type Message>>channel %s, message: %s", v.Channel, v.Data)
		case redis.Subscription:
			log.Infof("Type Subscribe>>channel %s, kind %s, count %d", v.Channel, v.Kind, v.Count)
			gRecoveryChan <- RECOVERY_TYPE_REDIS
		case error:
			log.Error("MonitorSentinel ERROR")
			runflag = false
			// Should re psubscrebe
		case redis.PMessage:
			log.Infof("Type PMessage>>channel %s, pattern %s, data %s", v.Channel, v.Pattern, v.Data)
			ParsePMessage(v)
		default:
			log.Warnf("Unkown Message Type of psubscribe")
		}
	}
}
Ejemplo n.º 11
0
Archivo: comet.go Proyecto: houcy/push
func pushMessage(appId string, app *RegApp, rawMsg *storage.RawMessage, header *Header, body []byte) bool {
	//if len(app.SendIds) != 0 {
	// regapp with sendids
	log.Infof("msgid %d: before push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
	if rawMsg.SendId != "" {
		found := false
		for _, sendid := range app.SendIds {
			if sendid == rawMsg.SendId {
				found = true
				break
			}
		}
		if !found {
			log.Debugf("msgid %d: check sendid (%s) failed", rawMsg.MsgId, rawMsg.SendId)
			return false
		}
	}

	x := DevicesMap.Get(app.DevId)
	if x == nil {
		log.Debugf("msgid %d: device %s offline", rawMsg.MsgId, app.DevId)
		return false
	}
	client := x.(*Client)
	client.SendMessage2(header, body)
	log.Infof("msgid %d: after push to (device %s) (regid %s)", rawMsg.MsgId, app.DevId, app.RegId)
	storage.Instance.MsgStatsSend(rawMsg.MsgId)
	storage.Instance.AppStatsSend(rawMsg.AppId)
	return true
}
Ejemplo n.º 12
0
Archivo: letv.go Proyecto: houcy/push
func (this *LetvAuth) Auth(token string) (bool, string) {
	url := fmt.Sprintf("%s/%s", this.url, token)
	//log.Infof("letv auth: url(%s)", url)
	res, err := http.Get(url)
	if err != nil {
		log.Warnf("http get failed: %s", err)
		return false, ""
	}
	body, err := ioutil.ReadAll(res.Body)
	if err != nil {
		log.Warnf("ioutil readall failed: %s", err)
		res.Body.Close()
		return false, ""
	}
	res.Body.Close()
	//log.Infof("sso response (%s)", body)
	var tr tokenResult
	err = json.Unmarshal(body, &tr)
	if err != nil {
		log.Warnf("json unmarshal failed: %s (%s)", err, body)
		return false, ""
	}
	if tr.Status != "1" || tr.ErrCode != "0" {
		log.Infof("sso result failed: (%s) (%s)", tr.Status, tr.ErrCode)
		return false, ""
	}
	m := tr.Bean.(map[string]interface{})
	result, ok := m["result"]
	if !ok {
		log.Infof("missing 'bean.result'")
		return false, ""
	}
	uid := result.(string)
	return true, "letv_" + uid
}
Ejemplo n.º 13
0
Archivo: peer.go Proyecto: xtfly/gofd
// This func is designed to be run as a goroutine. It
// listens for messages on a channel and sends them to a peer.
func (p *peer) peerWriter(errorChan chan peerMessage) {
	log.Infof("[%s] Writing messages to peer[%s]", p.taskID, p.address)
	var lastWriteTime time.Time

	for msg := range p.writeChan {
		now := time.Now()
		if len(msg) == 0 {
			// This is a keep-alive message.
			if now.Sub(lastWriteTime) < 2*time.Minute {
				continue
			}
			log.Tracef("[%s] Sending keep alive to peer[%s]", p.taskID, p.address)
		}
		lastWriteTime = now

		//log.Debugf("[%s] Sending message to peer[%s], length=%v", p.taskID, p.address, uint32(len(msg)))
		err := writeNBOUint32(p.flowctrlWriter, uint32(len(msg)))
		if err != nil {
			log.Error(err)
			break
		}
		_, err = p.flowctrlWriter.Write(msg)
		if err != nil {
			log.Errorf("[%s] Failed to write a message to peer[%s], length=%v, err=%v", p.taskID, p.address, len(msg), err)
			break
		}
	}

	log.Infof("[%s] Exiting Writing messages to peer[%s]", p.taskID, p.address)
	errorChan <- peerMessage{p, nil}
}
Ejemplo n.º 14
0
func main() {
	//err := LoadConfig("/system/etc/conf.json")
	err := LoadConfig("./conf.json")
	if err != nil {
		fmt.Printf("LoadConfig failed: (%s)", err)
		os.Exit(1)
	}

	//logger, err := log.LoggerFromConfigAsFile("/system/etc/log.xml")
	logger, err := log.LoggerFromConfigAsFile("./log.xml")
	if err != nil {
		fmt.Printf("Load log config failed: (%s)\n", err)
		os.Exit(1)
	}
	log.ReplaceLogger(logger)

	//wg := &sync.WaitGroup{}
	agent := NewAgent()
	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)

	wg := sync.WaitGroup{}
	wg.Add(1)
	go func() {
		agent.Run()
		log.Infof("agent quited")
		wg.Done()
	}()
	sig := <-c
	log.Infof("Received signal '%v', exiting\n", sig)
	agent.Stop()
	wg.Wait()
}
Ejemplo n.º 15
0
func writeSitemaps(outdir string, c crawler.Crawler) error {

	// Build sitemap and write to output file
	xmlout := fmt.Sprintf("%s/%s-sitemap.xml", outdir, c.Target().Host)
	xmlSitemap, err := sitemap.BuildXMLSitemap(c.AllPages())
	if err != nil {
		log.Criticalf("Failed to generate sitemap to %s", xmlout)
		os.Exit(1)
	}

	if err := ioutil.WriteFile(xmlout, xmlSitemap, 0644); err != nil {
		log.Criticalf("Failed to write sitemap to %s", xmlout)
		os.Exit(1)
	}
	log.Infof("Wrote XML sitemap to %s", xmlout)

	// Build JSON site description
	siteout := fmt.Sprintf("%s/%s-sitemap.json", outdir, c.Target().Host)

	b, err := sitemap.BuildJSONSiteStructure(c.Target(), c.AllPages())

	if err := ioutil.WriteFile(siteout, b, 0644); err != nil {
		log.Criticalf("Failed to write sitemap to %s", siteout)
		os.Exit(1)
	}
	log.Infof("Wrote JSON sitemap to %s", siteout)

	return nil
}
Ejemplo n.º 16
0
func (this *Server) Stop() {
	// close后,所有的exitCh都返回false
	log.Infof("stopping comet server")
	close(this.exitCh)
	this.waitGroup.Wait()
	log.Infof("comet server stopped")
}
Ejemplo n.º 17
0
/**
 * watch the slot list change.
 */
func (self *SlotInfoMaps) WatchSlotInfoMap() {
	_, _, ch, err := self.zk.GetZkConn().GetW("/yundis/ids")
	if err != nil {
		log.Errorf("Can not watch path /yundis/ids, err:%s", err)
	}

	go func() {
		for {
			event := <-ch
			log.Infof("Slotinfo list changed event, %+v", event)
			data, _, ch1, err1 := self.zk.GetZkConn().GetW("/yundis/ids")
			if err1 == nil {
				ch = ch1
				//handle the node list change event
				log.Infof("Slotinfo list changed : %s", data)
				infoMap := self.GetSlotInfoMapFromZk()
				//change the slotinfo state.
				self.SetSlotInfoMap(infoMap) //refresh nodeinfo map by new zk data.
				log.Info("Refresh slotinfo map by new zk data.")
			} else {
				log.Errorf("Can not watching the children of /yundis/ids, err:%s", err1)
				break
			}
			time.Sleep(time.Second)
		}
	}()
}
Ejemplo n.º 18
0
func main() {
	// Set up a done channel, that's shared by the whole pipeline.
	// Closing this channel will kill all pipeline goroutines
	//done := make(chan struct{})
	//defer close(done)

	// Set up logging
	initializeLogging()

	// Flush the log before we shutdown
	defer log.Flush()

	// Parse the command line flags
	config := parseCommandLineFlags()
	gamq.SetConfig(&config)

	if config.ProfilingEnabled {
		defer profile.Start(profile.CPUProfile).Stop()
	}

	log.Infof("Broker started on port: %d", gamq.Configuration.Port)
	log.Infof("Executing on: %d threads", runtime.GOMAXPROCS(-1))

	connectionManager := gamq.NewConnectionManager()
	connectionManager.Start()
}
Ejemplo n.º 19
0
func (s *RtmpNetStream) SendVideo(video *StreamPacket) error {
	log.Debug(video)
	if s.vkfsended {
		video.Timestamp -= s.vsend_time - uint32(s.bufferTime)
		s.vsend_time += video.Timestamp
		return sendVideo(s.conn, video)
	}
	if !video.isKeyFrame() {
		//log.Info("No Video Key Frame,Ignore Video ", video)
		//video = s.dispatcher.producer.lastVideoKeyFrame
		return nil
	}
	fkf := s.dispatcher.producer.firstVideoKeyFrame
	if fkf == nil {
		log.Info("No Video Configurate Record,Ignore Video ", video)
		return nil
	}
	fkf.Timestamp = 0
	log.Info("Send Video Configurate Record ", fkf)
	//log.Infof(" Payload %02X", fkf.Payload)
	ver := fkf.Payload[4+1]
	avcPfofile := fkf.Payload[4+2]
	profileCompatibility := fkf.Payload[4+3]
	avcLevel := fkf.Payload[4+4]
	reserved := fkf.Payload[4+5] >> 2
	lengthSizeMinusOne := fkf.Payload[4+5] & 0x03
	reserved2 := fkf.Payload[4+6] >> 5
	numOfSPS := fkf.Payload[4+6] & 31
	spsLength := util.BigEndian.Uint16(fkf.Payload[4+7:])
	sps := fkf.Payload[4+9 : 4+9+int(spsLength)]
	numOfPPS := fkf.Payload[4+9+int(spsLength)]
	ppsLength := util.BigEndian.Uint16(fkf.Payload[4+9+int(spsLength)+1:])
	pps := fkf.Payload[4+9+int(spsLength)+1+2:]
	log.Infof("  cfgVersion(%v) | avcProfile(%v) | profileCompatibility(%v) |avcLevel(%v) | reserved(%v) | lengthSizeMinusOne(%v) | reserved(%v) | numOfSPS(%v) |spsLength(%v) | sps(%02X) | numOfPPS(%v) | ppsLength(%v) | pps(%02X) ",
		ver,
		avcPfofile,
		profileCompatibility,
		avcLevel,
		reserved,
		lengthSizeMinusOne,
		reserved2,
		numOfSPS,
		spsLength,
		sps,
		numOfPPS,
		ppsLength,
		pps)
	err := sendFullVideo(s.conn, fkf)
	if err != nil {
		return err
	}
	s.vkfsended = true
	s.vsend_time = video.Timestamp
	video.Timestamp = 0
	log.Info("Send I Frame ", video)
	log.Infof(" Payload %v/%v", video.Payload[9]&0x1f, video.Payload[10])

	return sendFullVideo(s.conn, video)
}
Ejemplo n.º 20
0
func confirmOne(ack, nack chan uint64) {
	select {
	case tag := <-ack:
		log.Infof("confirmed delivery with delivery tag: %d", tag)
	case tag := <-nack:
		log.Infof("failed delivery of delivery tag: %d", tag)
	}
}
Ejemplo n.º 21
0
func runFsInspector(args []string) int {
	if err := fsFlagset.Parse(args); err != nil {
		log.Critical(err)
		return 1
	}

	if _fsFlags.OriginalDir == "" {
		log.Critical("original-dir is not set")
		return 1
	}

	if _fsFlags.Mountpoint == "" {
		log.Critical("mount-point is not set")
		return 1
	}

	if _fsFlags.AutopilotConfig != "" && _fsFlags.OrchestratorURL != ocutil.LocalOrchestratorURL {
		log.Critical("non-default orchestrator url set for autopilot orchestration mode")
		return 1
	}

	if logutil.Debug {
		// log level: 0..2
		hookfs.SetLogLevel(1)
	} else {
		hookfs.SetLogLevel(0)
	}

	if _fsFlags.AutopilotConfig != "" {
		cfg, err := config.NewFromFile(_fsFlags.AutopilotConfig)
		if err != nil {
			panic(log.Critical(err))
		}
		autopilotOrchestrator, err := ocutil.NewAutopilotOrchestrator(cfg)
		if err != nil {
			panic(log.Critical(err))
		}
		log.Info("Starting autopilot-mode orchestrator")
		go autopilotOrchestrator.Start()
	}

	hook := &inspector.FilesystemInspector{
		OrchestratorURL: _fsFlags.OrchestratorURL,
		EntityID:        _fsFlags.EntityID,
	}

	fs, err := hookfs.NewHookFs(_fsFlags.OriginalDir, _fsFlags.Mountpoint, hook)
	if err != nil {
		panic(log.Critical(err))
	}
	log.Infof("Serving %s", fs)
	log.Infof("Please run `fusermount -u %s` after using this, manually", _fsFlags.Mountpoint)
	if err = fs.Serve(); err != nil {
		panic(log.Critical(err))
	}
	// NOTREACHED
	return 0
}
// NewMKafka start monitor kafka service
func NewMKafka() ([]MKafka, time.Duration) {
	flag.Parse()

	//配置文件
	var confFile string
	if !filepath.IsAbs(*configFile) {
		confFile = fmt.Sprintf("%s/../%s", cmdPath, *configFile)
	}
	yamlFile, err := ioutil.ReadFile(confFile)
	if err != nil {
		panic(err)
	}

	err = yaml.Unmarshal(yamlFile, &c)
	if err != nil {
		panic(err)
	}

	if len(c.Monitor) == 0 {
		fmt.Printf("Not found monitor cluser ,Please check conf %s\n", confFile)
		os.Exit(1)
	}

	var infos []MKafka

	for _, m := range c.Monitor {

		zkcon, err := NewZk(c.Cluster[m].ZKList)
		//TODO: disable panic
		checkErr(1, err)
		if err != nil {
			panic(err)
		}

		lags := make(map[string]int64)
		for _, v := range c.Cluster[m].SetLag {
			lags[v.Consumer] = v.Lag
		}

		f := MKafka{
			ZKList:       c.Cluster[m].ZKList,
			ZNode:        c.Cluster[m].ZNode,
			ToolDir:      c.ENV.KafkaToolDir,
			LogFormat:    c.LOG.LogFormat,
			zkc:          zkcon,
			SetLag:       lags,
			Cluster:      m,
			TimeWait:     c.ENV.TimeWait,
			TimeInterval: c.ENV.TimeInterval,
			Concurrent:   c.ENV.Concurrent,
		}
		log.Infof("Load yaml conf %+v", f)
		infos = append(infos, f)
	}

	log.Infof("parse config file : %v", infos)
	return infos, c.ENV.Collect
}
Ejemplo n.º 23
0
func NewRpcClient(amqpURI, exchange string) (*RpcClient, error) {
	client := &RpcClient{
		exchange:     exchange,
		requestId:    0,
		rpcTimeout:   10,
		requestTable: make(map[uint32]chan []byte),
		lock:         new(sync.RWMutex),
	}

	var err error
	client.conn, err = amqp.Dial(amqpURI)
	if err != nil {
		log.Errorf("Dial: %s", err)
		return nil, err
	}

	log.Infof("Got Connection to %s, getting Channel", amqpURI)
	client.channel, err = client.conn.Channel()
	if err != nil {
		log.Errorf("Channel: %s", err)
		return nil, err
	}

	log.Infof("Got Channel, declaring %q Exchange (%q)", rpcExchangeType, exchange)

	if err := client.channel.ExchangeDeclare(
		exchange,        // name
		rpcExchangeType, // type
		true,            // durable
		false,           // auto-deleted
		false,           // internal
		false,           // noWait
		nil,             // arguments
	); err != nil {
		log.Errorf("Exchange Declare: %s", err)
		return nil, err
	}

	callbackQueue, err := client.channel.QueueDeclare(
		"",    // name
		false, // durable
		true,  // autoDelete
		true,  // exclusive
		false, // noWait
		nil,   // args
	)
	if err != nil {
		log.Errorf("callbackQueue Declare error: %s", err)
		return nil, err
	}
	client.callbackQueue = callbackQueue.Name
	log.Infof("declared callback queue [%s]", client.callbackQueue)

	go client.handleResponse()

	return client, nil
}
Ejemplo n.º 24
0
func NewConsumer(amqpURI, exchange string, qos int) (*Consumer, error) {
	queueName := utils.GetLocalIP()
	ctag := queueName + "_tag"
	c := &Consumer{
		conn:    nil,
		channel: nil,
		queue:   queueName,
		tag:     ctag,
		done:    make(chan error),
	}

	var err error

	log.Infof("dialing %q", amqpURI)
	c.conn, err = amqp.Dial(amqpURI)
	if err != nil {
		return nil, fmt.Errorf("Dial: %s", err)
	}

	/*go func() {
		log.Infof("closing: %s", <-c.conn.NotifyClose(make(chan *amqp.Error)))
	}()*/

	log.Infof("got Connection, getting Channel")
	c.channel, err = c.conn.Channel()
	if err != nil {
		return nil, fmt.Errorf("Channel: %s", err)
	}

	queue, err := c.channel.QueueDeclare(
		queueName, // name of the queue
		true,      // durable
		false,     // delete when usused
		false,     // exclusive
		false,     // noWait
		nil,       // arguments
	)
	if err != nil {
		return nil, fmt.Errorf("Queue Declare: %s", err)
	}

	log.Infof("declared Queue (%q %d messages, %d consumers), binding to Exchange (%s)",
		queue.Name, queue.Messages, queue.Consumers, exchange)

	if err = c.channel.QueueBind(
		queue.Name, // name of the queue
		"",         // bindingKey
		exchange,   // sourceExchange
		false,      // noWait
		nil,        // arguments
	); err != nil {
		return nil, fmt.Errorf("Queue Bind: %s", err)
	}

	c.channel.Qos(qos, 0, false)
	return c, nil
}
Ejemplo n.º 25
0
func calcDailyBytesServed(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) {
	bytesToTerabytes := 1000000000.00
	sampleTimeSecs := 60.00
	bitsTobytes := 8.00
	queryString := fmt.Sprintf(`select mean(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by time(1m), cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339))
	log.Infof("queryString = %v\n", queryString)
	res, err := queryDB(client, queryString, "cache_stats")
	if err != nil {
		log.Error("An error occured getting max bandwidth!\n")
		return
	}
	if res != nil && len(res[0].Series) > 0 {
		for _, row := range res[0].Series {
			bytesServed := float64(0)
			cdn := row.Tags["cdn"]
			for _, record := range row.Values {
				if record[1] != nil {
					value, err := record[1].(json.Number).Float64()
					if err != nil {
						log.Errorf("Couldn't parse value from record %v\n", record)
						continue
					}
					bytesServed += value * sampleTimeSecs / bitsTobytes
				}
			}
			bytesServedTB := bytesServed / bytesToTerabytes
			log.Infof("TBytes served for cdn %v = %v", cdn, bytesServedTB)
			//write to Traffic Ops
			var statsSummary traffic_ops.StatsSummary
			statsSummary.CDNName = cdn
			statsSummary.DeliveryService = "all"
			statsSummary.StatName = "daily_bytesserved"
			statsSummary.StatValue = strconv.FormatFloat(bytesServedTB, 'f', 2, 64)
			statsSummary.SummaryTime = time.Now().Format(time.RFC3339)
			statsSummary.StatDate = startTime.Format("2006-01-02")
			go writeSummaryStats(config, statsSummary)
			//write to Influxdb
			tags := map[string]string{"cdn": cdn, "deliveryservice": "all"}
			fields := map[string]interface{}{
				"value": bytesServedTB, //converted to TB
			}
			pt, err := influx.NewPoint(
				"daily_bytesserved",
				tags,
				fields,
				startTime,
			)
			if err != nil {
				log.Errorf("error adding creating data point for max Gbps...%v\n", err)
				continue
			}
			bp.AddPoint(pt)
		}
		config.BpsChan <- bp
	}
}
Ejemplo n.º 26
0
func main() {

	var (
		flConfig = flag.String("c", "./etc/conf.json", "Config file")
	)

	flag.Parse()

	err := conf.LoadConfig(*flConfig)
	if err != nil {
		fmt.Printf("LoadConfig (%s) failed: (%s)\n", *flConfig, err)
		os.Exit(1)
	}

	logger, err := log.LoggerFromConfigAsFile("./etc/log.xml")
	if err != nil {
		fmt.Printf("Load log config failed: (%s)\n", err)
		os.Exit(1)
	}

	log.ReplaceLogger(logger)

	waitGroup := &sync.WaitGroup{}
	cometServer := comet.NewServer()

	listener, err := cometServer.Init(conf.Config.Comet)
	if err != nil {
		log.Criticalf("Failed to start comet server: %s", err.Error())
		os.Exit(1)
	}

	cometServer.SetAcceptTimeout(time.Duration(conf.Config.AcceptTimeout) * time.Second)
	cometServer.SetReadTimeout(time.Duration(conf.Config.ReadTimeout) * time.Second)
	cometServer.SetHeartbeatTimeout(time.Duration(conf.Config.HeartbeatTimeout) * time.Second)

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
	go func() {
		sig := <-c
		log.Infof("Received signal '%v', exiting\n", sig)
		///utils.RemovePidFile(srv.	runtime.config.Pidfile)
		cometServer.Stop()
		log.Infof("leave 1")
		waitGroup.Done()
		log.Infof("leave 2")
	}()

	go func() {
		cometServer.Run(listener)
	}()

	waitGroup.Add(1)
	go api.StartHttp(conf.Config.Web)
	waitGroup.Wait()
}
func (imageManager *dockerImageManager) getUnusedImageForDeletion() *image.ImageState {
	imageManager.updateLock.RLock()
	defer imageManager.updateLock.RUnlock()
	candidateImageStatesForDeletion := imageManager.getCandidateImagesForDeletion()
	if len(candidateImageStatesForDeletion) < 1 {
		seelog.Infof("No eligible images for deletion for this cleanup cycle")
		return nil
	}
	seelog.Infof("Found %d eligible images for deletion", len(candidateImageStatesForDeletion))
	return imageManager.getLeastRecentlyUsedImage(candidateImageStatesForDeletion)
}
Ejemplo n.º 28
0
func calcDailyMaxGbps(client influx.Client, bp influx.BatchPoints, startTime time.Time, endTime time.Time, config StartupConfig) {
	kilobitsToGigabits := 1000000.00
	queryString := fmt.Sprintf(`select time, cdn, max(value) from "monthly"."bandwidth.cdn.1min" where time > '%s' and time < '%s' group by cdn`, startTime.Format(time.RFC3339), endTime.Format(time.RFC3339))
	log.Infof("queryString = %v\n", queryString)
	res, err := queryDB(client, queryString, "cache_stats")
	if err != nil {
		log.Errorf("An error occured getting max bandwidth! %v\n", err)
		return
	}
	if res != nil && len(res[0].Series) > 0 {
		for _, row := range res[0].Series {
			for _, record := range row.Values {
				t := record[0].(string)
				if record[1] != nil {
					cdn := record[1].(string)
					value, err := record[2].(json.Number).Float64()
					if err != nil {
						log.Errorf("Couldn't parse value from record %v\n", record)
						continue
					}
					value = value / kilobitsToGigabits
					statTime, _ := time.Parse(time.RFC3339, t)
					log.Infof("max gbps for cdn %v = %v", cdn, value)
					var statsSummary traffic_ops.StatsSummary
					statsSummary.CDNName = cdn
					statsSummary.DeliveryService = "all"
					statsSummary.StatName = "daily_maxgbps"
					statsSummary.StatValue = strconv.FormatFloat(value, 'f', 2, 64)
					statsSummary.SummaryTime = time.Now().Format(time.RFC3339)
					statsSummary.StatDate = statTime.Format("2006-01-02")
					go writeSummaryStats(config, statsSummary)

					//write to influxdb
					tags := map[string]string{"cdn": cdn, "deliveryservice": "all"}
					fields := map[string]interface{}{
						"value": value,
					}
					pt, err := influx.NewPoint(
						"daily_maxgbps",
						tags,
						fields,
						statTime,
					)
					if err != nil {
						fmt.Printf("error adding creating data point for max Gbps...%v\n", err)
						continue
					}
					bp.AddPoint(pt)
				}
			}
		}
	}
	config.BpsChan <- bp
}
Ejemplo n.º 29
0
func startHttp(addr string) {
	log.Infof("web server routine start")
	// push API
	http.HandleFunc("/sync/message", messageHandler)
	err := http.ListenAndServe(addr, nil)
	if err != nil {
		log.Criticalf("http listen: ", err)
		os.Exit(1)
	}
	log.Infof("web server routine stop")
}
Ejemplo n.º 30
0
// parameters:
//  - interval(duration): interval (default: 0 msecs)
//
// should support dynamic reloading
func (d *Dumb) LoadConfig(cfg config.Config) error {
	log.Debugf("CONFIG: %s", cfg.AllSettings())
	paramInterval := "explorepolicyparam.interval"
	if cfg.IsSet(paramInterval) {
		d.Interval = cfg.GetDuration(paramInterval)
		log.Infof("Set interval=%s", d.Interval)
	} else {
		log.Infof("Using default interval=%s", d.Interval)
	}
	return nil
}