예제 #1
0
func (c *win_wmi_collector) connect() (err error) {
	// use COINIT_MULTITHREADED model
	ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)

	unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
	if err != nil {
		logging.Critical("oleutil.CreateObject Failed: ", err)
		return err
	}
	defer unknown.Release()

	wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
	if err != nil {
		logging.Critical("QueryInterface Failed: ", err)
		return err
	}
	defer wmi.Release()

	serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer")
	if err != nil {
		logging.Critical("Connect to Server Failed", err)
		return err
	}

	c.service = serviceRaw.ToIDispatch()
	return nil
}
예제 #2
0
파일: ping.go 프로젝트: homingway/hickwall
func NewSinglePingCollector(name, prefix string, conf config.Config_single_pinger) *ping_collector {

	if conf.Target == "" {
		logging.Critical("CRITICAL: we cannot ping empty target.")
	}

	tags := conf.Tags.Copy()
	tags["target"] = conf.Target

	if conf.Packets <= 0 {
		conf.Packets = 1
	}

	c := &ping_collector{
		name:    name,
		enabled: true,
		config:  conf,
		prefix:  prefix,

		interval: conf.Interval.MustDuration(time.Second),
		timeout:  conf.Timeout.MustDuration(time.Millisecond * 500),
		tags:     tags,
	}
	return c
}
예제 #3
0
// this is the entry point of long running daemon
func run(isDebug bool, daemon bool) {
	if !config.IsCoreConfigLoaded() {
		err := config.LoadCoreConfig()
		if err != nil {
			logging.Critical("Failed to load core config: ", err)
			return
		}
	}

	go func() {
	loop:
		rss_mb := float64(gs.GetCurrentRSS()) / 1024 / 1024 // Mb
		logging.Tracef("current rss: %f", rss_mb)
		if rss_mb > float64(config.CoreConf.Rss_limit_mb) {
			logging.Criticalf("Suicide. CurrentRSS above limit: %f >= %d Mb", rss_mb, config.CoreConf.Rss_limit_mb)
			os.Exit(1)
		}
		next := time.After(time.Second)
		<-next
		goto loop
	}()

	if daemon {
		runService(isDebug)
	} else {
		runWithoutService()
	}
}
예제 #4
0
func MustNewWinWmiCollector(name, prefix string, opts config.Config_win_wmi) *win_wmi_collector {
	c := &win_wmi_collector{
		name:     name,
		enabled:  true,
		prefix:   prefix,
		interval: opts.Interval.MustDuration(time.Second * 15),

		config: opts,
	}

	err := c.connect()
	if err != nil {
		logging.Critical("CRITICAL: win_wmi_collector cannot connect: ", err)
	}

	return c
}
예제 #5
0
파일: kafka.go 프로젝트: homingway/hickwall
func (b *kafkaBackend) loop() {
	var (
		startConsuming    <-chan newcore.MultiDataPoint
		try_connect_first chan bool
		try_connect_tick  <-chan time.Time
	)
	startConsuming = b.updates
	logging.Info("kafkaBackend.loop started")

	for {
		if b.producer == nil && try_connect_first == nil && try_connect_tick == nil {
			startConsuming = nil // disable consuming

			try_connect_first = make(chan bool)
			logging.Debug("trying to connect to kafka first time.")

			// trying to connect to kafka first time
			go func() {
				err := b.connect()
				if b.producer != nil && err == nil {
					logging.Debugf("connect kafka first time OK: %v", b.producer)
					try_connect_first <- true
				} else {
					logging.Criticalf("connect to kafka failed %s", err)
					try_connect_first <- false
				}
			}()
		}
		if startConsuming != nil {
			logging.Trace("kafkaBackend consuming started")
		}

		select {
		case md := <-startConsuming:
			for idx, p := range md {
				b.producer.Input() <- &sarama.ProducerMessage{
					Topic: b.conf.Topic_id,
					Key:   sarama.StringEncoder(p.Metric),
					Value: p,
				}
				_d, _ := p.Encode()
				logging.Tracef("kafka producer ---> %d,  %s", idx, _d)
			}
			logging.Debugf("kafkaBackend consuming finished: count: %d", len(md))
		case connected := <-try_connect_first:
			try_connect_first = nil // disable this branch
			if !connected {
				// failed open it the first time,
				// then we try to open file with time interval, until connected successfully.
				logging.Critical("connect first time failed, try to connect with interval of 1s")
				try_connect_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debug("kafka connected the first time.")
				startConsuming = b.updates
			}
		case <-try_connect_tick:
			// try to connect with interval
			err := b.connect()
			if b.producer != nil && err == nil {
				// finally connected.
				try_connect_tick = nil
				startConsuming = b.updates
			} else {
				logging.Criticalf("kafka backend trying to connect but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Info("kafaBackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Info("kafaBackend.loop closed")
			return
		}
	}
}
예제 #6
0
func (b *influxdbBackend) loop() {
	var (
		startConsuming         <-chan newcore.MultiDataPoint
		try_create_client_once chan bool
		try_create_client_tick <-chan time.Time
	)
	startConsuming = b.updates
	logging.Debug("influxdb backend loop started ")

	for {
		if b.output == nil && try_create_client_once == nil && try_create_client_tick == nil {
			startConsuming = nil // disable consuming
			try_create_client_once = make(chan bool)
			// try to create influxdb client the first time async.
			go func() {
				err := b.newInfluxdbClientFromConf()
				if err == nil {
					try_create_client_once <- true
				} else {
					try_create_client_once <- false
				}
			}()
		}

		//TODO: Flush_interval and Max_batch_size
		select {
		case md := <-startConsuming:
			if b.output != nil {
				points := []client.Point{}
				for _, p := range md {
					// logging.Debug(p.Metric.Clean())
					// logging.Debug(utils.Convert(p.Value))
					points = append(points, client.Point{
						Measurement: p.Metric.Clean(),
						Time:        p.Timestamp,
						Fields: map[string]interface{}{
							"value": utils.Convert(p.Value),
						},
						Tags: p.Tags, //TODO: Tags
					})
				}
				write := client.BatchPoints{
					Database:        b.conf.Database,
					RetentionPolicy: b.conf.RetentionPolicy,
					Points:          points,
				}
				// logging.Debugf("write: count: %d", len(md))

				//FIXME: connection timeout?
				resp, err := b.output.Write(write)
				if err != nil {
					logging.Errorf("failed to write into influxdb: %v, %+v", err, resp)
				}
			}
		case opened := <-try_create_client_once:
			try_create_client_once = nil // disable this branch
			if !opened {
				// failed open it the first time,
				// then we try to open file with time interval, until opened successfully.
				logging.Debug("open the first time failed, try to open with interval of 1s")
				try_create_client_tick = time.Tick(time.Second * 1)
			} else {
				startConsuming = b.updates
			}
		case <-try_create_client_tick:
			// try to open with interval
			err := b.newInfluxdbClientFromConf()
			if b.output != nil && err == nil {
				// finally opened.
				try_create_client_tick = nil
				startConsuming = b.updates
			} else {
				logging.Critical("influxdb backend trying to open file but failed: %s", err)
			}
		case errc := <-b.closing:
			// fmt.Println("errc <- b.closing")
			logging.Debug("influxdb backend .loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Debug("influxdb backend .loop stopped")
			return
		}
	}
}
예제 #7
0
func runAsPrimaryService(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
	logging.Debug("runAsPrimaryService started")

	defer utils.Recover_and_log()

	const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
	changes <- svc.Status{State: svc.StartPending}
	changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}

	//http://localhost:6060/debug/pprof/
	// utils.HttpPprofServe(6060)

	//	after := time.After(time.Duration(8) * time.Minute)
	// f, _ := os.Create("d:\\cpu-" + strconv.Itoa(pid) + ".pprof")
	// pprof.StartCPUProfile(f)
	// defer pprof.StopCPUProfile()

	//	cfg := profile.Config{
	//		MemProfile:     true,
	//		ProfilePath:    "./pprofs/", // store profiles in current directory
	//		NoShutdownHook: true,        // do not hook SIGINT
	//	}
	//	p := profile.Start(&cfg)
	//
	//	defer p.Stop()

	// utils.StartCPUProfile()
	// defer utils.StopCPUProfile()

	// go func() {
	// 	for {
	// 		<-time.After(time.Second * time.Duration(15))
	// 		debug.FreeOSMemory()
	// 	}
	// }()

	err := hickwall.Start()
	if err != nil {
		logging.Critical("Failed To Start hickwall: %v", err)
		return
	} else {
		defer hickwall.Stop()
	}

	logging.Debug("service event handling loop started ")
	// major loop for signal processing.
loop:
	for {
		select {
		case c := <-r:
			switch c.Cmd {
			case svc.Interrogate:
				changes <- c.CurrentStatus
				// testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
				time.Sleep(100 * time.Millisecond)
				changes <- c.CurrentStatus
			case svc.Stop, svc.Shutdown:
				break loop
			default:
				logging.Errorf("unexpected control request #%d", c)
			}
		}
	}
	changes <- svc.Status{State: svc.StopPending}
	logging.Debug("runAsPrimaryService stopped")
	return
}