func (c *win_wmi_collector) query(query string, fields []string) ([]map[string]string, error) { if c.service != nil { resultRaw, err := oleutil.CallMethod(c.service, "ExecQuery", query) if err != nil { logging.Error("ExecQuery Failed: ", err) return nil, fmt.Errorf("ExecQuery Failed: %v", err) } result := resultRaw.ToIDispatch() defer result.Release() countVar, err := oleutil.GetProperty(result, "Count") if err != nil { logging.Error("Get result count Failed: ", err) return nil, fmt.Errorf("Get result count Failed: %v", err) } count := int(countVar.Val) resultMap := []map[string]string{} for i := 0; i < count; i++ { itemMap := make(map[string]string) itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i) if err != nil { return nil, fmt.Errorf("ItemIndex Failed: %v", err) } item := itemRaw.ToIDispatch() defer item.Release() for _, field := range fields { asString, err := oleutil.GetProperty(item, field) if err == nil { itemMap[field] = fmt.Sprintf("%v", asString.Value()) } else { logging.Errorf("cannot find field in SWbemObject: %v", err) } } resultMap = append(resultMap, itemMap) logging.Tracef("wmi query result: %+v", itemMap) } logging.Tracef("wmi query result count: %d", len(resultMap)) return resultMap, nil } else { logging.Error("win_wmi_collector c.service is nil") return nil, fmt.Errorf("win_wmi_collector c.service is nil") } }
func protect(h httprouter.Handle, expire time.Duration, trigger string) httprouter.Handle { return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { secure := false if trigger == "read" && config.CoreConf.Secure_api_read { secure = true } else if trigger == "write" && config.CoreConf.Secure_api_write { secure = true } logging.Infof("trigger: %s, secure: %v, write: %v, read: %v\n", trigger, secure, config.CoreConf.Secure_api_write, config.CoreConf.Secure_api_read) if secure { hostname := r.URL.Query().Get("hostname") if strings.ToLower(hostname) != newcore.GetHostname() { logging.Errorf("hostname mismatch: %v", hostname) http.Error(w, "hostname mismatch", 500) return } time_str := r.URL.Query().Get("time") tm, err := utils.UTCTimeFromUnixStr(time_str) if err != nil { logging.Errorf("invalid time: %v", time_str) http.Error(w, "Invalid Time", 500) return } if time.Now().Sub(tm) > expire { // expired reqeust logging.Errorf("expired request: %v", time.Now().Sub(tm)) http.Error(w, "expired request", 500) return } // we need to verify request. // request should put signature of this agent hostname into header HICKWALL_ADMIN_SIGN load_unsigner() signed_str := r.Header.Get("HICKWALL_ADMIN_SIGN") signed, err := base64.StdEncoding.DecodeString(signed_str) if err != nil { logging.Error("cannot decode sign") http.Error(w, "cannot decode sign", 500) return } toSign := fmt.Sprintf("%s%s", hostname, time_str) logging.Trace("unsign started") err = unsigner.Unsign([]byte(toSign), signed) logging.Trace("unsign finished") if err != nil { logging.Errorf("-> invalid signature: %v <-", string(signed)) http.Error(w, "invalid signature", 500) return } } h(w, r, ps) } }
func QueryWmi(query string) ([]map[string]string, error) { fields, err := parseFieldsFromQuery(query) if err != nil { logging.Error("cannot parse fields from query: %v", err) return nil, err } return QueryWmiFields(query, fields) }
func QueryWmiFields(query string, fields []string) ([]map[string]string, error) { if len(fields) == 1 && fields[0] == "*" { logging.Errorf("`select * ` not supported, need to address fields explicitly.") return nil, fmt.Errorf("`select * ` not supported, need to address fields explicitly.") } resultRaw, err := oleutil.CallMethod(wmi_service, "ExecQuery", query) if err != nil { logging.Error("ExecQuery Failed: ", err) return nil, fmt.Errorf("ExecQuery Failed: %v", err) } result := resultRaw.ToIDispatch() defer result.Release() countVar, err := oleutil.GetProperty(result, "Count") if err != nil { logging.Errorf("Get result count Failed: %v", err) return nil, fmt.Errorf("Get result count Failed: %v", err) } count := int(countVar.Val) resultMap := []map[string]string{} for i := 0; i < count; i++ { itemMap := make(map[string]string) itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i) if err != nil { return nil, fmt.Errorf("ItemIndex Failed: %v", err) } item := itemRaw.ToIDispatch() defer item.Release() for _, field := range fields { asString, err := oleutil.GetProperty(item, field) if err == nil { itemMap[field] = fmt.Sprintf("%v", asString.Value()) } else { fmt.Println(err) } } resultMap = append(resultMap, itemMap) logging.Tracef("wmi query result: %+v", itemMap) } logging.Tracef("wmi query result count: %d", len(resultMap)) return resultMap, nil }
func (b *influxdbBackend) newInfluxdbClientFromConf() error { iclient, err := NewInfluxdbClient(map[string]interface{}{ "Host": b.conf.Host, "URL": b.conf.URL, "Username": b.conf.Username, "Password": b.conf.Password, "UserAgent": "", "Database": b.conf.Database, "FlatTemplate": b.conf.FlatTemplate, }, b.version) if err != nil && iclient == nil { logging.Error("failed to create influxdb client: ", err) return fmt.Errorf("failed to create influxdb client: ", err) } b.output = iclient return nil }
func (b *fileBackend) loop() { var ( startConsuming <-chan newcore.MultiDataPoint try_open_file_once chan bool try_open_file_tick <-chan time.Time buf = bytes.NewBuffer(make([]byte, 0, 1024)) ) startConsuming = b.updates logging.Debugf("filebackend.loop started") for { if b.output == nil && try_open_file_once == nil && try_open_file_tick == nil { startConsuming = nil // disable consuming try_open_file_once = make(chan bool) // log.Println("try to open file the first time.") // try to open file the first time async. go func() { err := b.openFile() if b.output != nil && err == nil { // log.Println("openFile first time OK", b.output) try_open_file_once <- true } else { logging.Errorf("filebackend trying to open file but failed: %s", err) try_open_file_once <- false } }() } select { case md := <-startConsuming: for _, p := range md { if b.output != nil { res, _ := p.MarshalJSON() buf.Write(res) buf.Write([]byte("\n")) b.output.Write(buf.Bytes()) buf.Reset() } } case opened := <-try_open_file_once: try_open_file_once = nil // disable this branch if !opened { // failed open it the first time, // then we try to open file with time interval, until opened successfully. logging.Error("open the first time failed, try to open with interval of 1s") try_open_file_tick = time.Tick(time.Second * 1) } else { logging.Debugf("file opened the first time.") startConsuming = b.updates } case <-try_open_file_tick: // try to open with interval err := b.openFile() if b.output != nil && err == nil { // finally opened. try_open_file_tick = nil startConsuming = b.updates } else { logging.Errorf("filebackend trying to open file but failed: %s", err) } case errc := <-b.closing: logging.Debug("filebackend.loop closing") startConsuming = nil // stop comsuming errc <- nil close(b.updates) logging.Debug("filebackend.loop stopped") return } } }
func (this *Service) ContinueService() error { logging.Error("ServiceManager.ContinueService not supported ") return nil }
func (this *Service) PauseService() error { logging.Error("ServiceManager.PauseServicen not supported ") return nil }
func (this *Service) Status() (State, error) { logging.Error("ServiceManagement.Status not supported") return Unknown, fmt.Errorf("ServerMangement.Status not supported") }
func (c *InfluxdbClient_v088) Write(bp client090.BatchPoints) (*client090.Response, error) { // logging.Debug("InfluxdbClient_v088.Write") // v0.9.0-rc7 [ // { // Name: "a", // Timestamp: "1", // Fields: {"f1": "v1", "f2": "v2"}, // Precision: "s" // } // ] // v0.8.8 [ // { // "name": "log_lines", // "columns": ["time", "sequence_number", "line"], // "points": [ // [1400425947368, 1, "this line is first"], // [1400425947368, 2, "and this is second"] // ] // } // ] var series []*client088.Series for _, p := range bp.Points { s := client088.Series{} // s.Name = p.Name name, err := newcore.FlatMetricKeyAndTags(c.flat_tpl, p.Measurement, p.Tags) if err != nil { logging.Error("FlatMetricKeyAndTags Failed!", err) return nil, err } s.Name = name point := []interface{}{} // time, first s.Columns = append(s.Columns, "time") point = append(point, p.Time.UnixNano()/1000000) // then others for key, value := range p.Fields { s.Columns = append(s.Columns, key) point = append(point, value) } s.Points = append(s.Points, point) logging.Tracef("influxdb --> %+v", s) series = append(series, &s) } // pretty.Println(series) err := c.client.WriteSeriesWithTimePrecision(series, "ms") if err != nil { logging.Errorf("InfluxdbClient_v088.Write.WriteSeriesWithTimePrecision Error: %v", err) } else { logging.Trace("InfluxdbClient_v088.Write Done No Error") } return nil, err }