示例#1
1
func MustNewWinPdhCollector(name, prefix string, opts config.Config_win_pdh_collector) *win_pdh_collector {

	c := &win_pdh_collector{
		name:        name,
		enabled:     true,
		prefix:      prefix,
		interval:    opts.Interval.MustDuration(time.Second),
		config:      opts,
		hPdh:        pdh.NewPdhCollector(),
		map_queries: make(map[string]config.Config_win_pdh_query),
	}

	for _, q := range opts.Queries {
		if q.Metric == "" {
			logging.Errorf("Error Phd Collector metric is empty: %# v", pretty.Formatter(q))
			continue
		}

		c.hPdh.AddEnglishCounter(q.Query)
		if q.Tags == nil {
			q.Tags = newcore.AddTags.Copy()
		}

		if opts.Query_to_tag == true || q.Query_to_tag == true {
			q.Tags["query"] = q.Query
		}

		c.map_queries[q.Query] = q
	}
	logging.Tracef("MustNewWinPdhCollector:opts.Queries: %# v", pretty.Formatter(opts.Queries))
	logging.Tracef("MustNuewWinPdhCollector c.map_queries: %# v", pretty.Formatter(c.map_queries))
	return c
}
示例#2
0
func (b *DummyBackend) loop() {
	var (
		startConsuming <-chan MultiDataPoint
	)

	startConsuming = b.updates

	for {
		select {
		case md := <-startConsuming:
			if b.printting {
				if b.detail == true {
					for _, dp := range md {
						fmt.Printf("dummy(%s) --> %+v \n", b.name, dp)
						logging.Tracef("dummy(%s) --> %+v \n", b.name, dp)
					}
				} else {
					fmt.Printf("dummy(%s), consuming md length %d  \n", b.name, len(md))
					logging.Tracef("dummy(%s), consuming md length %d \n", b.name, len(md))
				}

			}
			if b.jamming > 0 {
				fmt.Println("jamming: ", b.jamming)
				time.Sleep(b.jamming)
			}
		case errc := <-b.closing:
			// fmt.Println("errc <- b.closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			return
		}
	}
}
示例#3
0
文件: wmi.go 项目: homingway/hickwall
func QueryWmiFields(query string, fields []string) ([]map[string]string, error) {

	if len(fields) == 1 && fields[0] == "*" {
		logging.Errorf("`select * ` not supported, need to address fields explicitly.")
		return nil, fmt.Errorf("`select * ` not supported, need to address fields explicitly.")
	}

	resultRaw, err := oleutil.CallMethod(wmi_service, "ExecQuery", query)
	if err != nil {
		logging.Error("ExecQuery Failed: ", err)
		return nil, fmt.Errorf("ExecQuery Failed: %v", err)
	}
	result := resultRaw.ToIDispatch()
	defer result.Release()

	countVar, err := oleutil.GetProperty(result, "Count")
	if err != nil {
		logging.Errorf("Get result count Failed: %v", err)
		return nil, fmt.Errorf("Get result count Failed: %v", err)
	}
	count := int(countVar.Val)

	resultMap := []map[string]string{}

	for i := 0; i < count; i++ {
		itemMap := make(map[string]string)

		itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i)
		if err != nil {
			return nil, fmt.Errorf("ItemIndex Failed: %v", err)
		}

		item := itemRaw.ToIDispatch()
		defer item.Release()

		for _, field := range fields {
			asString, err := oleutil.GetProperty(item, field)

			if err == nil {
				itemMap[field] = fmt.Sprintf("%v", asString.Value())
			} else {
				fmt.Println(err)
			}
		}

		resultMap = append(resultMap, itemMap)
		logging.Tracef("wmi query result: %+v", itemMap)
	}
	logging.Tracef("wmi query result count: %d", len(resultMap))
	return resultMap, nil
}
示例#4
0
func (c *win_wmi_collector) query(query string, fields []string) ([]map[string]string, error) {
	if c.service != nil {
		resultRaw, err := oleutil.CallMethod(c.service, "ExecQuery", query)
		if err != nil {
			logging.Error("ExecQuery Failed: ", err)
			return nil, fmt.Errorf("ExecQuery Failed: %v", err)
		}
		result := resultRaw.ToIDispatch()
		defer result.Release()

		countVar, err := oleutil.GetProperty(result, "Count")
		if err != nil {
			logging.Error("Get result count Failed: ", err)
			return nil, fmt.Errorf("Get result count Failed: %v", err)
		}
		count := int(countVar.Val)

		resultMap := []map[string]string{}

		for i := 0; i < count; i++ {
			itemMap := make(map[string]string)

			itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i)
			if err != nil {
				return nil, fmt.Errorf("ItemIndex Failed: %v", err)
			}

			item := itemRaw.ToIDispatch()
			defer item.Release()

			for _, field := range fields {
				asString, err := oleutil.GetProperty(item, field)

				if err == nil {
					itemMap[field] = fmt.Sprintf("%v", asString.Value())
				} else {
					logging.Errorf("cannot find field in SWbemObject: %v", err)
				}
			}

			resultMap = append(resultMap, itemMap)
			logging.Tracef("wmi query result: %+v", itemMap)
		}
		logging.Tracef("wmi query result count: %d", len(resultMap))
		return resultMap, nil
	} else {
		logging.Error("win_wmi_collector c.service is nil")
		return nil, fmt.Errorf("win_wmi_collector c.service is nil")
	}
}
示例#5
0
func (c *win_pdh_collector) CollectOnce() newcore.CollectResult {
	logging.Debug("win_pdh_collector.CollectOnce Started")

	var items newcore.MultiDataPoint

	for _, pd := range c.hPdh.CollectData() {
		if pd.Err == nil {
			query, ok := c.map_queries[pd.Query]
			if ok == true {
				logging.Tracef("query: %+v, \n %+v", query.Metric, query)
				items = append(items, newcore.NewDP(c.prefix, query.Metric.Clean(), pd.Value, query.Tags, "", "", ""))
			}
		} else {
			if strings.Index(pd.Err.Error(), `\Process(hickwall)\Working Set - Private`) < 0 {
				logging.Errorf("win_pdh_collector ERROR: ", pd.Err)
			}
		}
	}

	logging.Debugf("win_pdh_collector.CollectOnce Finished. count: %d", len(items))
	return newcore.CollectResult{
		Collected: items,
		Next:      time.Now().Add(c.interval),
		Err:       nil,
	}
}
示例#6
0
// this is the entry point of long running daemon
func run(isDebug bool, daemon bool) {
	if !config.IsCoreConfigLoaded() {
		err := config.LoadCoreConfig()
		if err != nil {
			logging.Critical("Failed to load core config: ", err)
			return
		}
	}

	go func() {
	loop:
		rss_mb := float64(gs.GetCurrentRSS()) / 1024 / 1024 // Mb
		logging.Tracef("current rss: %f", rss_mb)
		if rss_mb > float64(config.CoreConf.Rss_limit_mb) {
			logging.Criticalf("Suicide. CurrentRSS above limit: %f >= %d Mb", rss_mb, config.CoreConf.Rss_limit_mb)
			os.Exit(1)
		}
		next := time.After(time.Second)
		<-next
		goto loop
	}()

	if daemon {
		runService(isDebug)
	} else {
		runWithoutService()
	}
}
示例#7
0
func (f *fanout) cosuming(idx int, closing chan chan error) {
	var (
		first   MultiDataPoint
		pub     chan<- MultiDataPoint
		pending <-chan MultiDataPoint
	)

	first = nil
	pending = nil
	pub = nil

	logging.Tracef("fanout.consuming: -started- idx: %d, closing: 0x%X\n", idx, closing)

	for {
		if pending == nil && pub == nil {
			pending = f.pending[idx] // enable read from pending chan
		}
		logging.Tracef("fanout.consuming -1- idx: %d, first: %x, pending: %x, pub: %x\n", idx, &first, pending, pub)

		select {
		case first = <-pending:
			logging.Tracef("fanout.consuming -2- idx: %d, first: %x, pending: %x, pub: %x\n", idx, &first, pending, pub)
			pending = nil          // disable read from pending chan
			pub = f.chan_pubs[idx] // enable send to pub chan
		case pub <- first:
			logging.Debugf("fanout.consuming -3- send data Finished: %s idx: %d, sent cnt: %d, pub: %x\n", f.bks[idx].Name(), idx, len(first), pub)
			pub = nil   // disable send to pub chan
			first = nil // clear first
		case errc := <-closing:
			logging.Tracef("fanout.consuming -4.Start- closing idx: %d, first: %x, pending: %x, pub: %x\n", idx, &first, pending, pub)

			pending = nil // nil startSend channel
			pub = nil

			f.chan_pubs[idx] = nil // nil pub channel

			f.pending[idx] = nil

			errc <- nil // response to closing channel

			logging.Tracef("fanout.consuming -4.End- closing idx: %d, first: %x, pending: %x, pub: %x\n", idx, &first, pending, pub)
			return
		}
	}
}
示例#8
0
func CmdServiceStart(c *cli.Context) {
	logging.Trace("CmdServiceStart")
	// -----------------------------------
	err := HelperService.StartService()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s started\n", HelperService.Name())
		logging.Tracef("service %s started\n", HelperService.Name())
	}

	err = PrimaryService.StartService()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s started\n", PrimaryService.Name())
		logging.Tracef("service %s started\n", PrimaryService.Name())
	}
}
示例#9
0
func CmdServiceStatus(c *cli.Context) {
	logging.Trace("CmdServiceStatus")

	// -----------------------------------
	state, err := HelperService.Status()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s is %s\n", HelperService.Name(), servicelib.StateToString(state))
		logging.Tracef("service %s is %s\n", HelperService.Name(), servicelib.StateToString(state))
	}

	state, err = PrimaryService.Status()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)

	} else {
		fmt.Printf("service %s is %s\n", PrimaryService.Name(), servicelib.StateToString(state))
		logging.Tracef("service %s is %s\n", PrimaryService.Name(), servicelib.StateToString(state))
	}

}
示例#10
0
func _must_win_sys_wmi(name, prefix, interval string) newcore.Collector {
	wmi_config_str_tpl := `
interval: {-----}
queries:
    -
        query: "select Name, NumberOfCores from Win32_Processor"
        metrics:
            -
                value_from: "Name"
                metric: "win.wmi.cpu.name"
            -
                value_from: "NumberOfCores"
                metric: "win.wmi.cpu.numberofcores"
    -
        query: "select Name, Size from Win32_LogicalDisk where MediaType=11 or mediatype=12"
        metrics:
            -
                value_from: "Size"
                metric: "win.wmi.logicaldisk.total_size_bytes"
                tags: {
                    "mount": "{{.Name}}",
                }
`
	wmi_config_str := strings.Replace(wmi_config_str_tpl, "{-----}", interval, 1)

	logging.Tracef("pdh_config_str: %s", wmi_config_str)

	wmi_viper := viper.New()
	wmi_viper.SetConfigType("yaml")
	wmi_viper.ReadConfig(bytes.NewBuffer([]byte(wmi_config_str)))

	var tmp_wmi_conf c_conf.Config_win_wmi
	wmi_viper.Marshal(&tmp_wmi_conf)

	return MustNewWinWmiCollector(name, prefix, tmp_wmi_conf)
}
示例#11
0
func _must_win_sys_pdh(name, prefix, interval string) newcore.Collector {
	pdh_config_str_tpl := `
interval: {-----}
queries:
    -
        # CPU load
        query: "\\System\\Processor Queue Length"
        metric: "win.pdh.processor_queue_length"
    -
        query: "\\Processor(_Total)\\% Processor Time"
        metric: "win.pdh.pct_processor_time"
    -
        query: "\\Memory\\Available KBytes"
        metric: "win.pdh.memory.available_kbytes"
    -
        query: "\\Memory\\% Committed Bytes In Use"
        metric: "win.pdh.memory.pct_committed_bytes_in_use"
    -
        query: "\\PhysicalDisk(_Total)\\Avg. Disk sec/Read"
        metric: "win.pdh.physical_disk.avg_disk_sec_read"
    -
        query: "\\PhysicalDisk(_Total)\\Avg. Disk sec/Write"
        metric: "win.pdh.physical_disk.avg_disk_sec_write"
    -
        query: "\\TCPv4\\Connections Established"
        metric: "win.pdh.tcpv4.connections_established"
    -
        query: "\\System\\System Calls/sec"
        metric: "win.pdh.system.system_calls_sec"
    -
        query: "\\PhysicalDisk(_Total)\\Avg. Disk Bytes/Transfer"
        metric: "win.pdh.physical_disk.avg_disk_bytes_transfer"
    -
        query: "\\PhysicalDisk(_Total)\\Avg. Disk Queue Length"
        metric: "win.pdh.physical_disk.avg_disk_queue_length"
    -
        query: "\\PhysicalDisk(_Total)\\% Disk Time"
        metric: "win.pdh.physical_disk.pct_disk_time"
    -
        query: "\\LogicalDisk(C:)\\% Free Space"
        metric: "win.pdh.logicaldisk.pct_free_space_c"
    -
        query: "\\LogicalDisk(C:)\\Free Megabytes"
        metric: "win.pdh.logicaldisk.free_mbytes_c"
    -
        query: "\\LogicalDisk(D:)\\% Free Space"
        metric: "win.pdh.logicaldisk.pct_free_space_d"
    -
        query: "\\LogicalDisk(D:)\\Free Megabytes"
        metric: "win.pdh.logicaldisk.free_mbytes_d"
    -
        query: "\\TCPv4\\Connections Reset"
        metric: "win.pdh.tcpv4.connections_reset"
    -
        query: "\\TCPv4\\Connection Failures"
        metric: "win.pdh.tcpv4.connections_failures"

`
	pdh_config_str := strings.Replace(pdh_config_str_tpl, "{-----}", interval, 1)

	logging.Tracef("pdh_config_str: %s", pdh_config_str)

	pdh_viper := viper.New()
	pdh_viper.SetConfigType("yaml")
	pdh_viper.ReadConfig(bytes.NewBuffer([]byte(pdh_config_str)))
	var tmp_pdh_conf c_conf.Config_win_pdh_collector
	pdh_viper.Marshal(&tmp_pdh_conf)

	return MustNewWinPdhCollector(name, prefix, tmp_pdh_conf)
}
示例#12
0
// create the topology of our running core.
//
//  (collector -> subscription)s -> merged subscription -> fanout -> publications(backends)
//
func create_running_core_hooked(rconf *config.RuntimeConfig, ishook bool) (newcore.PublicationSet, *newcore.HookBackend, error) {
	var hook *newcore.HookBackend
	var subs []newcore.Subscription
	var heartbeat_exists bool

	if rconf == nil {
		return nil, nil, fmt.Errorf("RuntimeConfig is nil")
	}

	// create backends --------------------------------------------------------------------
	bks, err := backends.UseConfigCreateBackends(rconf)
	if err != nil {
		return nil, nil, err
	}

	if len(bks) <= 0 {
		return nil, nil, fmt.Errorf("no backends configured. program will do nothing.")
	}

	for _, bk := range bks {
		logging.Debugf("loaded backend: %s", bk.Name())
		logging.Tracef("loaded backend: %s -> %+v", bk.Name(), bk)
	}

	// create collectors ------------------------------------------------------------------
	clrs, err := collectors.UseConfigCreateCollectors(rconf)
	if err != nil {
		return nil, nil, err
	}

	// make sure heartbeat collector always created.
	for _, c := range clrs {
		if c.Name() == "heartbeat" {
			heartbeat_exists = true
		}
	}
	if heartbeat_exists == false {
		clrs = append(clrs, collectors.NewHeartBeat(rconf.Client.HeartBeat_Interval))
	}

	// create collector subscriptions.
	for _, c := range clrs {
		subs = append(subs, newcore.Subscribe(c, nil))
	}

	// create other subscriptions, such as kafka consumer
	_subs, err := collectors.UseConfigCreateSubscription(rconf)
	if err != nil {
		return nil, nil, err
	}
	subs = append(subs, _subs...)

	for _, s := range subs {
		logging.Debugf("loaded subscription: %s", s.Name())
		logging.Tracef("loaded subscription: %s -> %+v", s.Name(), s)
	}

	merged := newcore.Merge(subs...)

	if ishook == true {
		// the only reason to create a hooked running core is to do unittesting. it's not a good idea though.
		hook = newcore.NewHookBackend()
		bks = append(bks, hook)
		fset := newcore.FanOut(merged, bks...)
		return fset, hook, nil
	} else {
		fset := newcore.FanOut(merged, bks...)
		return fset, nil, nil
	}
}
示例#13
0
func (c *win_wmi_collector) CollectOnce() (res newcore.CollectResult) {
	var items newcore.MultiDataPoint

	for _, query := range c.config.Queries {

		fields := c.get_fields_of_query(query)

		results, err := c.query(query.Query, fields)

		if err != nil {
			continue
		}

		if len(results) > 0 {
			for _, record := range results {
				for _, item := range query.Metrics {

					metric, err := c.c_win_wmi_parse_metric_key(string(item.Metric), record)
					if err != nil {
						logging.Errorf("CollectOnce: %v", err)
						continue
					}

					tags, err := c.c_win_wmi_parse_tags(item.Tags, record)
					if err != nil {
						logging.Errorf("CollectOnce: %v", err)
						continue
					}

					tags = newcore.AddTags.Copy().Merge(query.Tags).Merge(tags)

					if value, ok := record[item.Value_from]; ok == true {
						items = append(items, newcore.NewDP(c.prefix, metric, value, tags, "", "", ""))
					} else if item.Default != "" {
						items = append(items, newcore.NewDP(c.prefix, metric, item.Default, tags, "", "", ""))
					}
				}
			}
		} else {
			for _, item := range query.Metrics {
				if item.Default != "" {
					// no templating support if no data got
					if strings.Contains(string(item.Metric), "{{") {
						continue
					}
					for _, value := range item.Tags {
						if strings.Contains(value, "{{") {
							continue
						}
					}
					tags := newcore.AddTags.Copy().Merge(query.Tags).Merge(item.Tags)
					items = append(items, newcore.NewDP(c.prefix, item.Metric.Clean(), item.Default, tags, "", "",
						""))
				}
			}
		}
	} // for queries

	for _, dp := range items {
		logging.Tracef("wmi DataPoint -> %+v", dp)
	}
	return newcore.CollectResult{
		Collected: items,
		Next:      time.Now().Add(c.interval),
		Err:       nil,
	}
}
示例#14
0
// loop periodically fecthes Items, sends them on s.updates, and exits
// when Close is called.
// CollectOnce asynchronously.
func (s sub) loop() {
	var (
		collectDone  chan CollectResult // if non-nil, CollectOnce is running
		pending      []MultiDataPoint
		next         time.Time
		err          error
		first        MultiDataPoint
		updates      chan MultiDataPoint
		startCollect <-chan time.Time
		collectDelay time.Duration
		now          = time.Now()
	)

	for {
		startCollect = nil
		first = nil
		updates = nil

		if now = time.Now(); next.After(now) {
			collectDelay = next.Sub(now)
		}

		if s.collector.IsEnabled() && collectDone == nil && len(pending) < s.maxPending {
			startCollect = time.After(collectDelay) // enable collect case
		}

		if len(pending) > 0 {
			first = pending[0]
			updates = s.updates // enable send case
		}

		select {
		case <-startCollect:
			collectDone = make(chan CollectResult, 1) // enable CollectOnce

			// TODO: add unittest for this.
			// collectOnce should be call async, otherwise, will block consuming result.
			// TODO: leaking param c
			go func() {
				// defer func() {
				// 	if r := recover(); r != nil {
				//		logging.Criticalf("---------- Recovered -------%v", r)
				// 	}
				// }()
				logging.Tracef("running collector.CollectOnce: %s", s.collector.Name())
				res := s.collector.CollectOnce()
				collectDone <- res
				logging.Debugf("finished collector.CollectOnce: %s, count: %d", s.collector.Name(), len(res.Collected))
			}()
		case result := <-collectDone:
			//  logging.Info("result := <- collectDone", result)
			collectDone = nil

			next, err = result.Next, result.Err
			if err != nil {
				// sub default delay if error happens while collecting data
				//TODO: add unittest for delay_on_error. delay_on_error vs collector.interval ???
				logging.Errorf("ERROR: collector(%s) error: %v", s.collector.Name(), err)
				next = time.Now().Add(s.delay_on_error)
				break
			}

			//TODO: add unittest
			if next.Sub(time.Now()) < minimal_next_interval {
				next = time.Now().Add(minimal_next_interval)
			}

			if result.Collected != nil {
				// don't consuming nil collected result.
				pending = append(pending, result.Collected)
			}
		case errc := <-s.closing:
			// clean up collector resource.
			errc <- s.collector.Close()
			close(s.updates)
			return
		case updates <- first:
			pending = pending[1:]
		}
	}
}
示例#15
0
func (c *kafka_subscription) consume() (<-chan newcore.MultiDataPoint, error) {
	logging.Info("consume")

	var out = make(chan newcore.MultiDataPoint)
	var err error
	var consumers []sarama.PartitionConsumer
	if c.master == nil {
		err = c.connect()
		if err != nil {
			return nil, err
		}
	}

	for _, c := range c.consumers {
		c.Close()
	}
	c.consumers = nil

	partitions, err := c.master.Partitions(c.opts.Topic)
	if err != nil {
		return nil, fmt.Errorf("Cannot get partitions: %v", err)
	}
	logging.Infof("partitions: %v", partitions)

	err = c.state.Load()
	if err != nil {
		logging.Errorf("failed to load kafka state: %v", err)
	} else {
		logging.Infof("state: %+v", c.state.State())
	}

	flush_offset := true

	for _, part := range partitions {
		offset := int64(0)
		if c.state.Length() > 0 {
			offset = c.state.Offset(c.opts.Topic, part)
			if offset < 0 {
				offset = 0
			}
		}
		consumer, err := c.master.ConsumePartition(c.opts.Topic, part, offset)
		if err != nil {
			logging.Criticalf("Cannot consumer partition: %d, %v", part, err)
			return nil, fmt.Errorf("Cannot consumer partition: %d, %v", part, err)
		}
		logging.Infof("created consumer: %v", consumer)

		consumers = append(consumers, consumer)

		go func(flush_offset bool, topic string, part int32, out chan newcore.MultiDataPoint, consumer sarama.PartitionConsumer) {
			logging.Infof("start goroutine to consume: part: %d,  %v", part, &consumer)

			var items newcore.MultiDataPoint
			var flush_tick = time.Tick(c.flush_interval)
			var _out chan newcore.MultiDataPoint
			var startConsume <-chan *sarama.ConsumerMessage
			var flushing bool
			var offset int64

			for {
				if (flushing == true && len(items) > 0) || len(items) >= c.max_batch_size {
					_out = out         // enable output branch
					startConsume = nil // disable consuming branch
				} else if len(items) < c.max_batch_size {
					startConsume = consumer.Messages() // enable consuming branch
					_out = nil                         // disable output branch
				}

				select {
				case message := <-startConsume:
					offset = message.Offset
					dp, err := newcore.NewDPFromJson(message.Value)
					if err != nil {
						logging.Tracef("[ERROR]failed to parse datapoint: %v", err)
					}
					logging.Tracef("kafka dp --> %v", dp)
					items = append(items, dp)
				case <-flush_tick:
					flushing = true
					// every part consumer will record offset with interval
					c.state.Update(topic, part, offset)

					// only 1 goroutine will save state to disk
					if flush_offset == true && c.state.Changed() == true {
						logging.Tracef("flusing to disk: part: %d, offset: %d", part, offset)
						c.state.Save()
					}
				case _out <- items:
					items = nil                        // clear items
					_out = nil                         // disable output branch
					startConsume = consumer.Messages() // enable consuming branch
					flushing = false                   // disable flusing
				case err := <-consumer.Errors():
					logging.Infof("consumer.Errors: part:%d,  %v", part, err)
				}
			}
		}(flush_offset, c.opts.Topic, part, out, consumer)

		flush_offset = false // only 1st goroutine is responsible for flushing state back into disk
	}
	c.consumers = consumers
	return out, nil
}
示例#16
0
func GetSystemInfo() (SystemInfo, error) {
	var info = SystemInfo{}
	cs_info, err := wmi.QueryWmi("SELECT Name, Domain, NumberOfLogicalProcessors, NumberOfProcessors, TotalPhysicalMemory FROM Win32_ComputerSystem")
	logging.Tracef("err: %v, cs_info: %v", err, cs_info)
	if err != nil {
		return info, err
	}
	if len(cs_info) != 1 {
		return info, fmt.Errorf("invalid query result: %v", cs_info)
	}
	cs_info_m := cs_info[0]

	info.Name = newcore.GetHostname()

	if string_value, ok := cs_info_m["Domain"]; ok == true {
		info.Domain = string_value
	}

	if string_value, ok := cs_info_m["NumberOfLogicalProcessors"]; ok == true {
		int_value, err := strconv.Atoi(string_value)
		if err != nil {
			return info, err
		}
		info.NumberOfLogicalProcessors = int_value
	}

	if string_value, ok := cs_info_m["NumberOfProcessors"]; ok == true {
		int_value, err := strconv.Atoi(string_value)
		if err != nil {
			return info, err
		}
		info.NumberOfProcessors = int_value
	}

	if string_value, ok := cs_info_m["TotalPhysicalMemory"]; ok == true {
		int_value, err := strconv.Atoi(string_value)
		if err != nil {
			return info, err
		}
		info.TotalPhsycialMemoryKb = int_value / 1024
	}

	os_info, err := wmi.QueryWmi("Select Caption, CSDVersion, OSArchitecture, Version From Win32_OperatingSystem")
	logging.Tracef("err: %v, os_info: %v", err, os_info)
	if err != nil {
		return info, err
	}
	if len(os_info) != 1 {
		return info, fmt.Errorf("invalid query result: %v", os_info)
	}
	os_info_m := os_info[0]

	if string_value, ok := os_info_m["Caption"]; ok == true {
		info.OS = string_value
	}

	csdversion := ""
	if string_value, ok := os_info_m["CSDVersion"]; ok == true {
		csdversion = string_value
	}

	if string_value, ok := os_info_m["Version"]; ok == true {
		version := string_value
		info.OSVersion = fmt.Sprintf("%s - %s", csdversion, version)
	}

	if string_value, ok := os_info_m["OSArchitecture"]; ok == true {
		if string_value == "64-bit" {
			info.Architecture = 64
		} else {
			info.Architecture = 32
		}

	}

	//FIXME: we may not be able to get ip list.
	ipv4list, err := utils.Ipv4List()
	if err != nil {
		logging.Errorf("failed to get ipv4 list: %v", err)
		//		return info, err
	} else {
		info.IPv4 = ipv4list
	}

	return info, nil
}
func (c *InfluxdbClient_v088) Write(bp client090.BatchPoints) (*client090.Response, error) {
	// logging.Debug("InfluxdbClient_v088.Write")
	// v0.9.0-rc7 [
	//  {
	//      Name: "a",
	//      Timestamp: "1",
	//      Fields: {"f1": "v1", "f2": "v2"},
	//      Precision: "s"
	//  }
	// ]

	// v0.8.8  [
	//   {
	//     "name": "log_lines",
	//     "columns": ["time", "sequence_number", "line"],
	//     "points": [
	//       [1400425947368, 1, "this line is first"],
	//       [1400425947368, 2, "and this is second"]
	//     ]
	//   }
	// ]

	var series []*client088.Series

	for _, p := range bp.Points {
		s := client088.Series{}
		// s.Name = p.Name
		name, err := newcore.FlatMetricKeyAndTags(c.flat_tpl, p.Measurement, p.Tags)
		if err != nil {
			logging.Error("FlatMetricKeyAndTags Failed!", err)
			return nil, err
		}
		s.Name = name

		point := []interface{}{}

		// time, first
		s.Columns = append(s.Columns, "time")
		point = append(point, p.Time.UnixNano()/1000000)

		// then others
		for key, value := range p.Fields {
			s.Columns = append(s.Columns, key)
			point = append(point, value)
		}

		s.Points = append(s.Points, point)

		logging.Tracef("influxdb --> %+v", s)

		series = append(series, &s)
	}

	// pretty.Println(series)

	err := c.client.WriteSeriesWithTimePrecision(series, "ms")
	if err != nil {
		logging.Errorf("InfluxdbClient_v088.Write.WriteSeriesWithTimePrecision Error: %v", err)
	} else {
		logging.Trace("InfluxdbClient_v088.Write Done No Error")
	}

	return nil, err
}
示例#18
0
func (b *kafkaBackend) loop() {
	var (
		startConsuming    <-chan newcore.MultiDataPoint
		try_connect_first chan bool
		try_connect_tick  <-chan time.Time
	)
	startConsuming = b.updates
	logging.Info("kafkaBackend.loop started")

	for {
		if b.producer == nil && try_connect_first == nil && try_connect_tick == nil {
			startConsuming = nil // disable consuming

			try_connect_first = make(chan bool)
			logging.Debug("trying to connect to kafka first time.")

			// trying to connect to kafka first time
			go func() {
				err := b.connect()
				if b.producer != nil && err == nil {
					logging.Debugf("connect kafka first time OK: %v", b.producer)
					try_connect_first <- true
				} else {
					logging.Criticalf("connect to kafka failed %s", err)
					try_connect_first <- false
				}
			}()
		}
		if startConsuming != nil {
			logging.Trace("kafkaBackend consuming started")
		}

		select {
		case md := <-startConsuming:
			for idx, p := range md {
				b.producer.Input() <- &sarama.ProducerMessage{
					Topic: b.conf.Topic_id,
					Key:   sarama.StringEncoder(p.Metric),
					Value: p,
				}
				_d, _ := p.Encode()
				logging.Tracef("kafka producer ---> %d,  %s", idx, _d)
			}
			logging.Debugf("kafkaBackend consuming finished: count: %d", len(md))
		case connected := <-try_connect_first:
			try_connect_first = nil // disable this branch
			if !connected {
				// failed open it the first time,
				// then we try to open file with time interval, until connected successfully.
				logging.Critical("connect first time failed, try to connect with interval of 1s")
				try_connect_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debug("kafka connected the first time.")
				startConsuming = b.updates
			}
		case <-try_connect_tick:
			// try to connect with interval
			err := b.connect()
			if b.producer != nil && err == nil {
				// finally connected.
				try_connect_tick = nil
				startConsuming = b.updates
			} else {
				logging.Criticalf("kafka backend trying to connect but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Info("kafaBackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Info("kafaBackend.loop closed")
			return
		}
	}
}