Ejemplo n.º 1
0
func (this *Service) RemoveService() error {
	logging.Debug("ServiceManager.RemoveService")

	str, err := this.Remove()
	printCmdRes(str, err)
	return err
}
Ejemplo n.º 2
0
func (this *Service) InstallService() error {
	logging.Debug("ServiceManager.InstallService")

	str, err := this.Install()
	printCmdRes(str, err)
	return err
}
Ejemplo n.º 3
0
func new_core_from_file() (*config.RuntimeConfig, error) {
	logging.Debug("NewCoreFromFile")
	rconf, err := config.LoadRuntimeConfigFromFiles()
	if err != nil {
		logging.Errorf("NewCoreFromFile: Failed to load RuntimeConfig from files: %v", err)
		return rconf, err
	}
	logging.Debug("NewCoreFromFile: load config from file finished.")
	err = UpdateRunningCore(rconf)
	if err != nil {
		logging.Errorf("NewCoreFromFile: Failed to create running core: %v", err)
		return rconf, err
	}
	logging.Debug("NewCoreFromFile finished witout error")
	return nil, nil
}
Ejemplo n.º 4
0
func (c *win_pdh_collector) CollectOnce() newcore.CollectResult {
	logging.Debug("win_pdh_collector.CollectOnce Started")

	var items newcore.MultiDataPoint

	for _, pd := range c.hPdh.CollectData() {
		if pd.Err == nil {
			query, ok := c.map_queries[pd.Query]
			if ok == true {
				logging.Tracef("query: %+v, \n %+v", query.Metric, query)
				items = append(items, newcore.NewDP(c.prefix, query.Metric.Clean(), pd.Value, query.Tags, "", "", ""))
			}
		} else {
			if strings.Index(pd.Err.Error(), `\Process(hickwall)\Working Set - Private`) < 0 {
				logging.Errorf("win_pdh_collector ERROR: ", pd.Err)
			}
		}
	}

	logging.Debugf("win_pdh_collector.CollectOnce Finished. count: %d", len(items))
	return newcore.CollectResult{
		Collected: items,
		Next:      time.Now().Add(c.interval),
		Err:       nil,
	}
}
Ejemplo n.º 5
0
func CmdShowConfig(c *cli.Context) {
	logging.Debug("CmdShowConfig")

	fmt.Printf("CoreConfig: %+v\n", config.CoreConf)

	//TODO: get runtime config from running core.
}
Ejemplo n.º 6
0
func Test_running_core_MultipleCore(t *testing.T) {
	logging.Debug("")
	//	logging.SetLevel("debug")
	rconf, err := config.ReadRuntimeConfig(bytes.NewBuffer(configs["file"]))
	if err != nil {
		t.Errorf("err %v", err)
		return
	}

	err = UpdateRunningCore(rconf)
	if err != nil {
		t.Errorf("err %v", err)
	}
	if the_core == nil {
		t.Errorf("err %v", err)
	}

	for i := 0; i < 100; i++ {
		err = UpdateRunningCore(rconf)
		if err != nil {
			t.Errorf("err %v", err)
		}
		if the_core == nil {
			t.Errorf("err %v", err)
		}
	}
}
Ejemplo n.º 7
0
func (this *Service) StopService() error {
	logging.Debug("ServiceManager.StopService")

	str, err := this.Stop()
	printCmdRes(str, err)
	return err
}
Ejemplo n.º 8
0
func runService(isDebug bool) {
	defer utils.Recover_and_log()
	logging.Debug("runService")

	err = svc.Run(command.PrimaryService.Name(), &serviceHandler{})
	if err != nil {
		logging.Errorf("runService: failed: %v\r\n", err)
	}
}
Ejemplo n.º 9
0
// Dump RuntimeConfig rawdata content into a file. override if file already exists
func DumpRuntimeConfig(rconf *RuntimeConfig) error {
	logging.Debug("DumpRuntimeConfig")
	if len(rconf.rawdata) <= 0 {
		return fmt.Errorf("runtime config rawdata length <= 0")
	}
	h := md5.New()
	h.Write(rconf.rawdata)
	hash := hex.EncodeToString(h.Sum(nil))
	if hash != rconf.hash {
		return fmt.Errorf("rawdata has been modified!")
	}
	err := ioutil.WriteFile(CONF_CACHE_PATH, rconf.rawdata, 0644)
	if err != nil {
		return fmt.Errorf("failed to dump RuntimeConfig: %v", err)
	}
	logging.Debug("DumpRuntimeConfig Finished")
	return nil
}
Ejemplo n.º 10
0
func (b *kafkaBackend) connect() error {
	producer, err := sarama.NewAsyncProducer(b.conf.Broker_list, b.kconf)
	if err != nil {
		logging.Errorf("failed to start producer: %v, %v", err, b.conf.Broker_list)
		return fmt.Errorf("failed to start producer: %v, %v", err, b.conf.Broker_list)
	}

	go func() {
		logging.Debug("consuming from producer.Errors()")
		for err := range producer.Errors() {
			logging.Errorf("producer error: %v", err)
		}
		logging.Debug("producer.Errors() closed")
	}()

	logging.Infof("created new producer: %v", b.conf.Broker_list)

	// save producer reference
	b.producer = producer
	return nil
}
Ejemplo n.º 11
0
func LoadCoreConfig() error {
	data, err := ioutil.ReadFile(CORE_CONF_FILEPATH)
	if err != nil {
		return fmt.Errorf("faild to read core config: %v", err)
	}
	CoreConf = CoreConfig{}
	// we can use yaml to load config directly. only because
	// core config structure is very simple and flat.
	err = yaml.Unmarshal(data, &CoreConf)
	if err != nil {
		return fmt.Errorf("unable to unmarshal yaml: %v", err)
	}

	if CoreConf.Rss_limit_mb <= 0 {
		CoreConf.Rss_limit_mb = 50 //deffault rss limit
	}
	if CoreConf.Listen_port <= 0 {
		CoreConf.Listen_port = 3031
	}
	if CoreConf.Hostname != "" {
		newcore.SetHostname(CoreConf.Hostname)
	}

	logging.SetLevel(CoreConf.Log_level)
	if err != nil {
		return fmt.Errorf("LoadCoreConfFile failed: %v", err)
	}

	if CoreConf.Enable_http_api && (CoreConf.Secure_api_read || CoreConf.Secure_api_write) {
		// we should check public key config.
		_, err := utils.LoadPublicKeyFromPath(CoreConf.Server_pub_key_path)
		if err != nil {
			logging.Criticalf("unable to load server public key while SecureAPIx is set to be true: %s", err)
		}
	}

	logging.Debugf("SHARED_DIR:            %s\n", SHARED_DIR)
	logging.Debugf("LOG_DIR:               %s\n", LOG_DIR)
	logging.Debugf("LOG_FILEPATH:          %s\n", LOG_FILEPATH)
	logging.Debugf("CORE_CONF_FILEPATH:    %s\n", CORE_CONF_FILEPATH)
	logging.Debugf("CONF_FILEPATH:         %s\n", CONF_FILEPATH)
	logging.Debugf("REGISTRY_FILEPATH:     %s\n", REGISTRY_FILEPATH)
	logging.Debugf("CONF_GROUP_DIRECTORY:  %s\n", CONF_GROUP_DIRECTORY)
	logging.Debugf("CoreConfig:            %+v\n", CoreConf)
	logging.Debug("CoreConfig Loaded ==============================================")

	core_conf_loaded = true
	return nil
}
Ejemplo n.º 12
0
// Update RunningCore with provided RuntimeConfig.
func UpdateRunningCore(rconf *config.RuntimeConfig) error {
	logging.Debug("UpdateRunningCore")
	if rconf == nil {
		return fmt.Errorf("rconf is nil")
	}
	core, _, err := create_running_core_hooked(rconf, false)

	// http pprof
	// https://github.com/golang/go/issues/4674
	// we can only open http pprof, cannot close it.
	if pprof_serving == false && rconf.Client.Pprof_enabled == true {
		if rconf.Client.Pprof_listen == "" {
			rconf.Client.Pprof_listen = ":6060"
		}
		go func() {
			pprof_serving = true
			logging.Infof("http pprof is listen and served on: %v", rconf.Client.Pprof_listen)
			err := http.ListenAndServe(rconf.Client.Pprof_listen, nil)
			logging.Errorf("pprof ListenAndServe Error: %v", err)
			pprof_serving = false
		}()
	}

	// if registry give us an empty config. agent should also reflect this change.
	close_core()

	if err != nil {
		return err
	}

	//	close_core()
	the_core = core
	the_rconf = rconf
	logging.Debug("UpdateRunningCore Finished")
	return nil
}
Ejemplo n.º 13
0
func new_core_from_registry(stop chan error) {
	logging.Debug("new_core_from_registry started")
	if stop == nil {
		logging.Panic("stop chan is nil")
	}

	if len(config.CoreConf.Registry_urls) <= 0 {
		logging.Panic("RegistryURLs is empty!!")
	}

	resp, err := load_reg_response()
	if err != nil {
		logging.Errorf("we don't have a valid registry info cached.")
		next := time.After(0)

		// round robin registry machines
		r := ring.New(len(config.CoreConf.Registry_urls))
		for i := 0; i < r.Len(); i++ {
			r.Value = config.CoreConf.Registry_urls[i]
			r = r.Next()
		}

	registry_loop:
		for {
			select {
			case <-next:
				r = r.Next()
				resp, err = do_registry(r.Value.(string))
				if err == nil {
					logging.Info("we are registry we got a valid registry response.")
					break registry_loop
				} else {
					logging.Errorf("failed to registry: %v", err)
				}
				next = time.After(newcore.Interval(config.CoreConf.Registry_delay_on_error).MustDuration(time.Minute))
			}
		}
	}

	// TODO: handle error here. like etcd_machines are not working.
	// here we got a valid registry info. get config and start to run.
	new_core_from_etcd(resp.EtcdMachines, resp.EtcdConfigPath, stop)
}
Ejemplo n.º 14
0
func do_registry(reg_url string) (*registry_response, error) {
	logging.Debug("do_registry started")
	req, err := new_reg_request()
	if err != nil {
		return nil, logging.SError(err)
	}
	hReq, err := new_hashed_reg_request(req)
	if err != nil {
		return nil, logging.SError(err)
	}

	hResp, err := goreq.Request{
		Method:      "POST",
		Uri:         reg_url,
		Body:        hReq,
		Accept:      "application/json",
		ContentType: "application/json",
		UserAgent:   "hickwall",
		Timeout:     10 * time.Second,
	}.Do()

	if serr, ok := err.(*goreq.Error); ok {
		if serr.Timeout() {
			return nil, logging.SError(err)
		}
		return nil, logging.SErrorf("registry failed: %d", hResp.StatusCode)
	}
	defer hResp.Body.Close()

	if hResp.StatusCode != 200 {
		return nil, logging.SErrorf("status code != 200: %d", hResp.StatusCode)
	}

	body, err := ioutil.ReadAll(hResp.Body)
	if err != nil {
		return nil, logging.SErrorf("failed to read body %v", err)
	}

	resp, err := new_reg_response_from_json(body)
	if err != nil {
		return nil, logging.SError(err)
	}

	if resp.ErrorCode != 0 {
		if resp.ErrorCode == 1001 {
			logging.Warn("this agent already registried.")
		} else {
			return nil, logging.SErrorf("registry server give this error: %d, msg: %s", resp.ErrorCode, resp.ErrorMsg)
		}

	}

	if len(resp.EtcdMachines) <= 0 {
		return nil, logging.SError("EtcdMachines is empty")
	}

	for _, m := range resp.EtcdMachines {
		machine, err := url.Parse(m)
		if err != nil {
			return nil, logging.SErrorf("invalid etcd machine url: %s", err)
		}
		if machine.Scheme != "http" {
			return nil, logging.SErrorf("etcd machine url only support http : %s", m)
		}
	}

	if resp.EtcdConfigPath == "" {
		return nil, logging.SError("config path is empty")
	}

	if resp.RequestHash != hReq.Hash {
		return nil, logging.SErrorf("request hash and response hash mismatch: %s != (response)%s", hReq.Hash, resp.RequestHash)
	}
	resp.Request = req

	logging.Debug("do_registry finished")
	resp.Save()
	return resp, nil
}
Ejemplo n.º 15
0
func (f *fanout) loop() {
	logging.Debug("fanout.loop() started")
	var (
		startConsuming <-chan MultiDataPoint
	)

	startConsuming = f.sub.Updates()

	for idx, _ := range f.chan_pubs {
		closing := make(chan chan error)
		f.closing_list = append(f.closing_list, closing)
		go f.cosuming(idx, closing)
	}

main_loop:
	for {
		select {
		case md, opening := <-startConsuming:
			if opening == false {
				f.Close()
				break main_loop
			}
			for idx, p := range f.pending {
				_ = idx
				if len(p) < maxPending {
					p <- md
				} else {
					logging.Warnf("fanout.loop.main_loop: pending channel is jamming: bkname: %s\n", f.bks[idx].Name())
				}
			}
		case errc := <-f.closing:
			startConsuming = nil // stop consuming from sub

			for idx, bk := range f.bks {
				// closing consuming of each backend
				consuming_errc := make(chan error)
				f.closing_list[idx] <- consuming_errc
				<-consuming_errc

				// close backend.
				go func() {
					consuming_errc <- bk.Close()
				}()
				timeout := time.After(time.Duration(1) * time.Second)
			wait_bk_close:
				for {
					select {
					case <-consuming_errc:
						break wait_bk_close
					case <-timeout:
						logging.Errorf("backend(%s) is blocking the fanout closing process!\n", bk.Name())
						break wait_bk_close
					}
				}

			}
			logging.Debug("fanout.loop() closed all consuming backends")
			errc <- nil
			break main_loop
		}
	}

	logging.Debug("fanout.loop() exit main_loop")

	timeout := time.After(time.Duration(1) * time.Second)
	closing_sub := make(chan error)
	go func() {
		closing_sub <- f.sub.Close()
	}()
	for {
		select {
		case <-closing_sub:
			logging.Debug("fanout.loop() returned")
			return
		case <-timeout:
			logging.Errorf("Subscription(%s) is blocking the fanout closing process! forced return with timeout\n", f.sub.Name())
			return
		}
	}
}
Ejemplo n.º 16
0
func (b *kafkaBackend) loop() {
	var (
		startConsuming    <-chan newcore.MultiDataPoint
		try_connect_first chan bool
		try_connect_tick  <-chan time.Time
	)
	startConsuming = b.updates
	logging.Info("kafkaBackend.loop started")

	for {
		if b.producer == nil && try_connect_first == nil && try_connect_tick == nil {
			startConsuming = nil // disable consuming

			try_connect_first = make(chan bool)
			logging.Debug("trying to connect to kafka first time.")

			// trying to connect to kafka first time
			go func() {
				err := b.connect()
				if b.producer != nil && err == nil {
					logging.Debugf("connect kafka first time OK: %v", b.producer)
					try_connect_first <- true
				} else {
					logging.Criticalf("connect to kafka failed %s", err)
					try_connect_first <- false
				}
			}()
		}
		if startConsuming != nil {
			logging.Trace("kafkaBackend consuming started")
		}

		select {
		case md := <-startConsuming:
			for idx, p := range md {
				b.producer.Input() <- &sarama.ProducerMessage{
					Topic: b.conf.Topic_id,
					Key:   sarama.StringEncoder(p.Metric),
					Value: p,
				}
				_d, _ := p.Encode()
				logging.Tracef("kafka producer ---> %d,  %s", idx, _d)
			}
			logging.Debugf("kafkaBackend consuming finished: count: %d", len(md))
		case connected := <-try_connect_first:
			try_connect_first = nil // disable this branch
			if !connected {
				// failed open it the first time,
				// then we try to open file with time interval, until connected successfully.
				logging.Critical("connect first time failed, try to connect with interval of 1s")
				try_connect_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debug("kafka connected the first time.")
				startConsuming = b.updates
			}
		case <-try_connect_tick:
			// try to connect with interval
			err := b.connect()
			if b.producer != nil && err == nil {
				// finally connected.
				try_connect_tick = nil
				startConsuming = b.updates
			} else {
				logging.Criticalf("kafka backend trying to connect but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Info("kafaBackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Info("kafaBackend.loop closed")
			return
		}
	}
}
Ejemplo n.º 17
0
func (b *fileBackend) loop() {
	var (
		startConsuming     <-chan newcore.MultiDataPoint
		try_open_file_once chan bool
		try_open_file_tick <-chan time.Time
		buf                = bytes.NewBuffer(make([]byte, 0, 1024))
	)
	startConsuming = b.updates
	logging.Debugf("filebackend.loop started")

	for {
		if b.output == nil && try_open_file_once == nil && try_open_file_tick == nil {
			startConsuming = nil // disable consuming
			try_open_file_once = make(chan bool)
			// log.Println("try to open file the first time.")

			// try to open file the first time async.
			go func() {
				err := b.openFile()

				if b.output != nil && err == nil {
					// log.Println("openFile first time OK", b.output)
					try_open_file_once <- true
				} else {
					logging.Errorf("filebackend trying to open file but failed: %s", err)
					try_open_file_once <- false
				}
			}()
		}

		select {
		case md := <-startConsuming:
			for _, p := range md {
				if b.output != nil {
					res, _ := p.MarshalJSON()
					buf.Write(res)
					buf.Write([]byte("\n"))
					b.output.Write(buf.Bytes())
					buf.Reset()
				}
			}

		case opened := <-try_open_file_once:
			try_open_file_once = nil // disable this branch
			if !opened {
				// failed open it the first time,
				// then we try to open file with time interval, until opened successfully.
				logging.Error("open the first time failed, try to open with interval of 1s")
				try_open_file_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debugf("file opened the first time.")
				startConsuming = b.updates
			}
		case <-try_open_file_tick:
			// try to open with interval
			err := b.openFile()
			if b.output != nil && err == nil {
				// finally opened.
				try_open_file_tick = nil
				startConsuming = b.updates
			} else {
				logging.Errorf("filebackend trying to open file but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Debug("filebackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Debug("filebackend.loop stopped")
			return
		}
	}
}
Ejemplo n.º 18
0
func runAsPrimaryService(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
	logging.Debug("runAsPrimaryService started")

	defer utils.Recover_and_log()

	const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
	changes <- svc.Status{State: svc.StartPending}
	changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}

	//http://localhost:6060/debug/pprof/
	// utils.HttpPprofServe(6060)

	//	after := time.After(time.Duration(8) * time.Minute)
	// f, _ := os.Create("d:\\cpu-" + strconv.Itoa(pid) + ".pprof")
	// pprof.StartCPUProfile(f)
	// defer pprof.StopCPUProfile()

	//	cfg := profile.Config{
	//		MemProfile:     true,
	//		ProfilePath:    "./pprofs/", // store profiles in current directory
	//		NoShutdownHook: true,        // do not hook SIGINT
	//	}
	//	p := profile.Start(&cfg)
	//
	//	defer p.Stop()

	// utils.StartCPUProfile()
	// defer utils.StopCPUProfile()

	// go func() {
	// 	for {
	// 		<-time.After(time.Second * time.Duration(15))
	// 		debug.FreeOSMemory()
	// 	}
	// }()

	err := hickwall.Start()
	if err != nil {
		logging.Critical("Failed To Start hickwall: %v", err)
		return
	} else {
		defer hickwall.Stop()
	}

	logging.Debug("service event handling loop started ")
	// major loop for signal processing.
loop:
	for {
		select {
		case c := <-r:
			switch c.Cmd {
			case svc.Interrogate:
				changes <- c.CurrentStatus
				// testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
				time.Sleep(100 * time.Millisecond)
				changes <- c.CurrentStatus
			case svc.Stop, svc.Shutdown:
				break loop
			default:
				logging.Errorf("unexpected control request #%d", c)
			}
		}
	}
	changes <- svc.Status{State: svc.StopPending}
	logging.Debug("runAsPrimaryService stopped")
	return
}
Ejemplo n.º 19
0
func main() {
	defer utils.Recover_and_log()
	logging.Debug("hickwall main ------- sub packages init process finished")

	app := cli.NewApp()
	app.Name = "hickwall"
	app.Usage = "collect metrics effortlessly."
	app.Version = fmt.Sprintf("%s - %s", Version, Build)

	app.Commands = []cli.Command{
		//TODO: configuration test, reload
		// {
		// 	Name:      "config",
		// 	ShortName: "",
		// 	Usage:     "config",
		// 	Subcommands: []cli.Command{
		// 		{
		// 			Name:      "test",
		// 			ShortName: "",
		// 			Usage:     "test",
		// 			Action:    command.CmdConfigTest,
		// 		},
		// 		{
		// 			Name:      "reload",
		// 			ShortName: "",
		// 			Usage:     "reload",
		// 			Action:    command.CmdConfigReload,
		// 		},
		// 	},
		// },
		{
			Name:      "service",
			ShortName: "s",
			Usage:     "service",
			Subcommands: []cli.Command{
				{
					Name:      "status",
					ShortName: "s",
					Usage:     "status",
					Action:    command.CmdServiceStatus,
				},
				{
					Name:   "statuscode",
					Usage:  "statuscode(internal use only.)",
					Action: command.CmdServiceStatusCode,
				},
				{
					Name:      "install",
					ShortName: "i",
					Usage:     "install service",
					Action:    command.CmdServiceInstall,
				},
				{
					Name:      "remove",
					ShortName: "d",
					Usage:     "remove service",
					Action:    command.CmdServiceRemove,
				},
				{
					Name:      "start",
					ShortName: "g",
					Usage:     "start service.",
					Action:    command.CmdServiceStart,
				},
				{
					Name:      "stop",
					ShortName: "x",
					Usage:     "stop service.",
					Action:    command.CmdServiceStop,
				},
				{
					Name:      "restart",
					ShortName: "n",
					Usage:     "restart service",
					Action:    command.CmdServiceRestart,
				},
			},
		},
		{
			Name:      "version",
			ShortName: "v",
			Usage:     "show version info",
			Action: func(c *cli.Context) {
				fmt.Printf("%s version: %s\n", app.Name, app.Version)
			},
		},
		{
			Name:      "daemon",
			ShortName: "d",
			Usage:     "run as daemon",
			Action: func(c *cli.Context) {
				run(false, false)
			},
		},
		{
			Name:   "config",
			Usage:  "show config info",
			Action: command.CmdShowConfig,
		},
	}

	if len(os.Args) >= 2 {

		logging.Debug("executing commands")
		app.Run(os.Args)

	} else {

		isIntSess, err := servicelib.IsAnInteractiveSession()
		if err != nil {
			logging.Errorf("failed to determine if we are running in an interactive session or not: %v", err)
			return
		}

		if !isIntSess {
			logging.Debug("running as service")
			run(false, true)
			return
		}

		//print help here.
		app.Run(os.Args)
	}
	return
}
Ejemplo n.º 20
0
func (b *influxdbBackend) loop() {
	var (
		startConsuming         <-chan newcore.MultiDataPoint
		try_create_client_once chan bool
		try_create_client_tick <-chan time.Time
	)
	startConsuming = b.updates
	logging.Debug("influxdb backend loop started ")

	for {
		if b.output == nil && try_create_client_once == nil && try_create_client_tick == nil {
			startConsuming = nil // disable consuming
			try_create_client_once = make(chan bool)
			// try to create influxdb client the first time async.
			go func() {
				err := b.newInfluxdbClientFromConf()
				if err == nil {
					try_create_client_once <- true
				} else {
					try_create_client_once <- false
				}
			}()
		}

		//TODO: Flush_interval and Max_batch_size
		select {
		case md := <-startConsuming:
			if b.output != nil {
				points := []client.Point{}
				for _, p := range md {
					// logging.Debug(p.Metric.Clean())
					// logging.Debug(utils.Convert(p.Value))
					points = append(points, client.Point{
						Measurement: p.Metric.Clean(),
						Time:        p.Timestamp,
						Fields: map[string]interface{}{
							"value": utils.Convert(p.Value),
						},
						Tags: p.Tags, //TODO: Tags
					})
				}
				write := client.BatchPoints{
					Database:        b.conf.Database,
					RetentionPolicy: b.conf.RetentionPolicy,
					Points:          points,
				}
				// logging.Debugf("write: count: %d", len(md))

				//FIXME: connection timeout?
				resp, err := b.output.Write(write)
				if err != nil {
					logging.Errorf("failed to write into influxdb: %v, %+v", err, resp)
				}
			}
		case opened := <-try_create_client_once:
			try_create_client_once = nil // disable this branch
			if !opened {
				// failed open it the first time,
				// then we try to open file with time interval, until opened successfully.
				logging.Debug("open the first time failed, try to open with interval of 1s")
				try_create_client_tick = time.Tick(time.Second * 1)
			} else {
				startConsuming = b.updates
			}
		case <-try_create_client_tick:
			// try to open with interval
			err := b.newInfluxdbClientFromConf()
			if b.output != nil && err == nil {
				// finally opened.
				try_create_client_tick = nil
				startConsuming = b.updates
			} else {
				logging.Critical("influxdb backend trying to open file but failed: %s", err)
			}
		case errc := <-b.closing:
			// fmt.Println("errc <- b.closing")
			logging.Debug("influxdb backend .loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Debug("influxdb backend .loop stopped")
			return
		}
	}
}