コード例 #1
0
ファイル: api_server.go プロジェクト: homingway/hickwall
func serve_api() {
	router := httprouter.New()
	router.GET("/sys_info", protect_read(serveSysInfo, time.Second))

	router.DELETE("/registry/revoke", protect_write(serveRegistryRevoke, time.Second))

	//	router.GET("/registry", serveRegistryGet)
	//	router.POST("/registry/accept", serveRegistryAccept)

	//	router.PUT("/registry/renew", serveRegistryRenew)
	//	router.PUT("/config/renew", serveConfigRenew)

	addr := ":3031"
	if config.CoreConf.Listen_port > 0 {
		addr = fmt.Sprintf(":%d", config.CoreConf.Listen_port)
	}
	logging.Infof("api served at: %s", addr)

	api_srv_running = true
	err := http.ListenAndServe(addr, router)
	api_srv_running = false
	if err != nil {
		logging.Criticalf("api server is not running!: %v", err)
	} else {
		logging.Info("api server stopped")
	}
}
コード例 #2
0
ファイル: run_service_win.go プロジェクト: homingway/hickwall
// this is the entry point of long running daemon
func run(isDebug bool, daemon bool) {
	if !config.IsCoreConfigLoaded() {
		err := config.LoadCoreConfig()
		if err != nil {
			logging.Critical("Failed to load core config: ", err)
			return
		}
	}

	go func() {
	loop:
		rss_mb := float64(gs.GetCurrentRSS()) / 1024 / 1024 // Mb
		logging.Tracef("current rss: %f", rss_mb)
		if rss_mb > float64(config.CoreConf.Rss_limit_mb) {
			logging.Criticalf("Suicide. CurrentRSS above limit: %f >= %d Mb", rss_mb, config.CoreConf.Rss_limit_mb)
			os.Exit(1)
		}
		next := time.After(time.Second)
		<-next
		goto loop
	}()

	if daemon {
		runService(isDebug)
	} else {
		runWithoutService()
	}
}
コード例 #3
0
ファイル: core_config.go プロジェクト: homingway/hickwall
func LoadCoreConfig() error {
	data, err := ioutil.ReadFile(CORE_CONF_FILEPATH)
	if err != nil {
		return fmt.Errorf("faild to read core config: %v", err)
	}
	CoreConf = CoreConfig{}
	// we can use yaml to load config directly. only because
	// core config structure is very simple and flat.
	err = yaml.Unmarshal(data, &CoreConf)
	if err != nil {
		return fmt.Errorf("unable to unmarshal yaml: %v", err)
	}

	if CoreConf.Rss_limit_mb <= 0 {
		CoreConf.Rss_limit_mb = 50 //deffault rss limit
	}
	if CoreConf.Listen_port <= 0 {
		CoreConf.Listen_port = 3031
	}
	if CoreConf.Hostname != "" {
		newcore.SetHostname(CoreConf.Hostname)
	}

	logging.SetLevel(CoreConf.Log_level)
	if err != nil {
		return fmt.Errorf("LoadCoreConfFile failed: %v", err)
	}

	if CoreConf.Enable_http_api && (CoreConf.Secure_api_read || CoreConf.Secure_api_write) {
		// we should check public key config.
		_, err := utils.LoadPublicKeyFromPath(CoreConf.Server_pub_key_path)
		if err != nil {
			logging.Criticalf("unable to load server public key while SecureAPIx is set to be true: %s", err)
		}
	}

	logging.Debugf("SHARED_DIR:            %s\n", SHARED_DIR)
	logging.Debugf("LOG_DIR:               %s\n", LOG_DIR)
	logging.Debugf("LOG_FILEPATH:          %s\n", LOG_FILEPATH)
	logging.Debugf("CORE_CONF_FILEPATH:    %s\n", CORE_CONF_FILEPATH)
	logging.Debugf("CONF_FILEPATH:         %s\n", CONF_FILEPATH)
	logging.Debugf("REGISTRY_FILEPATH:     %s\n", REGISTRY_FILEPATH)
	logging.Debugf("CONF_GROUP_DIRECTORY:  %s\n", CONF_GROUP_DIRECTORY)
	logging.Debugf("CoreConfig:            %+v\n", CoreConf)
	logging.Debug("CoreConfig Loaded ==============================================")

	core_conf_loaded = true
	return nil
}
コード例 #4
0
ファイル: wmi.go プロジェクト: homingway/hickwall
func init() {
	ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)

	unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
	if err != nil {
		logging.Criticalf("oleutil.CreateObject Failed: %v", err)
		panic(err)
	}
	defer unknown.Release()

	wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
	if err != nil {
		logging.Criticalf("QueryInterface Failed: %v", err)
		panic(err)
	}
	defer wmi.Release()

	serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer")
	if err != nil {
		logging.Criticalf("Connect to Server Failed: %v", err)
		panic(err)
	}
	wmi_service = serviceRaw.ToIDispatch()
}
コード例 #5
0
func (c *kafka_subscription) consume() (<-chan newcore.MultiDataPoint, error) {
	logging.Info("consume")

	var out = make(chan newcore.MultiDataPoint)
	var err error
	var consumers []sarama.PartitionConsumer
	if c.master == nil {
		err = c.connect()
		if err != nil {
			return nil, err
		}
	}

	for _, c := range c.consumers {
		c.Close()
	}
	c.consumers = nil

	partitions, err := c.master.Partitions(c.opts.Topic)
	if err != nil {
		return nil, fmt.Errorf("Cannot get partitions: %v", err)
	}
	logging.Infof("partitions: %v", partitions)

	err = c.state.Load()
	if err != nil {
		logging.Errorf("failed to load kafka state: %v", err)
	} else {
		logging.Infof("state: %+v", c.state.State())
	}

	flush_offset := true

	for _, part := range partitions {
		offset := int64(0)
		if c.state.Length() > 0 {
			offset = c.state.Offset(c.opts.Topic, part)
			if offset < 0 {
				offset = 0
			}
		}
		consumer, err := c.master.ConsumePartition(c.opts.Topic, part, offset)
		if err != nil {
			logging.Criticalf("Cannot consumer partition: %d, %v", part, err)
			return nil, fmt.Errorf("Cannot consumer partition: %d, %v", part, err)
		}
		logging.Infof("created consumer: %v", consumer)

		consumers = append(consumers, consumer)

		go func(flush_offset bool, topic string, part int32, out chan newcore.MultiDataPoint, consumer sarama.PartitionConsumer) {
			logging.Infof("start goroutine to consume: part: %d,  %v", part, &consumer)

			var items newcore.MultiDataPoint
			var flush_tick = time.Tick(c.flush_interval)
			var _out chan newcore.MultiDataPoint
			var startConsume <-chan *sarama.ConsumerMessage
			var flushing bool
			var offset int64

			for {
				if (flushing == true && len(items) > 0) || len(items) >= c.max_batch_size {
					_out = out         // enable output branch
					startConsume = nil // disable consuming branch
				} else if len(items) < c.max_batch_size {
					startConsume = consumer.Messages() // enable consuming branch
					_out = nil                         // disable output branch
				}

				select {
				case message := <-startConsume:
					offset = message.Offset
					dp, err := newcore.NewDPFromJson(message.Value)
					if err != nil {
						logging.Tracef("[ERROR]failed to parse datapoint: %v", err)
					}
					logging.Tracef("kafka dp --> %v", dp)
					items = append(items, dp)
				case <-flush_tick:
					flushing = true
					// every part consumer will record offset with interval
					c.state.Update(topic, part, offset)

					// only 1 goroutine will save state to disk
					if flush_offset == true && c.state.Changed() == true {
						logging.Tracef("flusing to disk: part: %d, offset: %d", part, offset)
						c.state.Save()
					}
				case _out <- items:
					items = nil                        // clear items
					_out = nil                         // disable output branch
					startConsume = consumer.Messages() // enable consuming branch
					flushing = false                   // disable flusing
				case err := <-consumer.Errors():
					logging.Infof("consumer.Errors: part:%d,  %v", part, err)
				}
			}
		}(flush_offset, c.opts.Topic, part, out, consumer)

		flush_offset = false // only 1st goroutine is responsible for flushing state back into disk
	}
	c.consumers = consumers
	return out, nil
}
コード例 #6
0
ファイル: kafka.go プロジェクト: homingway/hickwall
func (b *kafkaBackend) loop() {
	var (
		startConsuming    <-chan newcore.MultiDataPoint
		try_connect_first chan bool
		try_connect_tick  <-chan time.Time
	)
	startConsuming = b.updates
	logging.Info("kafkaBackend.loop started")

	for {
		if b.producer == nil && try_connect_first == nil && try_connect_tick == nil {
			startConsuming = nil // disable consuming

			try_connect_first = make(chan bool)
			logging.Debug("trying to connect to kafka first time.")

			// trying to connect to kafka first time
			go func() {
				err := b.connect()
				if b.producer != nil && err == nil {
					logging.Debugf("connect kafka first time OK: %v", b.producer)
					try_connect_first <- true
				} else {
					logging.Criticalf("connect to kafka failed %s", err)
					try_connect_first <- false
				}
			}()
		}
		if startConsuming != nil {
			logging.Trace("kafkaBackend consuming started")
		}

		select {
		case md := <-startConsuming:
			for idx, p := range md {
				b.producer.Input() <- &sarama.ProducerMessage{
					Topic: b.conf.Topic_id,
					Key:   sarama.StringEncoder(p.Metric),
					Value: p,
				}
				_d, _ := p.Encode()
				logging.Tracef("kafka producer ---> %d,  %s", idx, _d)
			}
			logging.Debugf("kafkaBackend consuming finished: count: %d", len(md))
		case connected := <-try_connect_first:
			try_connect_first = nil // disable this branch
			if !connected {
				// failed open it the first time,
				// then we try to open file with time interval, until connected successfully.
				logging.Critical("connect first time failed, try to connect with interval of 1s")
				try_connect_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debug("kafka connected the first time.")
				startConsuming = b.updates
			}
		case <-try_connect_tick:
			// try to connect with interval
			err := b.connect()
			if b.producer != nil && err == nil {
				// finally connected.
				try_connect_tick = nil
				startConsuming = b.updates
			} else {
				logging.Criticalf("kafka backend trying to connect but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Info("kafaBackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Info("kafaBackend.loop closed")
			return
		}
	}
}