Esempio n. 1
0
func protect(h httprouter.Handle, expire time.Duration, trigger string) httprouter.Handle {
	return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {

		secure := false
		if trigger == "read" && config.CoreConf.Secure_api_read {
			secure = true
		} else if trigger == "write" && config.CoreConf.Secure_api_write {
			secure = true
		}
		logging.Infof("trigger: %s, secure: %v, write: %v, read: %v\n", trigger, secure, config.CoreConf.Secure_api_write, config.CoreConf.Secure_api_read)

		if secure {
			hostname := r.URL.Query().Get("hostname")
			if strings.ToLower(hostname) != newcore.GetHostname() {
				logging.Errorf("hostname mismatch: %v", hostname)
				http.Error(w, "hostname mismatch", 500)
				return
			}

			time_str := r.URL.Query().Get("time")
			tm, err := utils.UTCTimeFromUnixStr(time_str)
			if err != nil {
				logging.Errorf("invalid time: %v", time_str)
				http.Error(w, "Invalid Time", 500)
				return
			}

			if time.Now().Sub(tm) > expire {
				// expired reqeust
				logging.Errorf("expired request: %v", time.Now().Sub(tm))
				http.Error(w, "expired request", 500)
				return
			}

			// we need to verify request.
			// request should put signature of this agent hostname into header HICKWALL_ADMIN_SIGN
			load_unsigner()

			signed_str := r.Header.Get("HICKWALL_ADMIN_SIGN")
			signed, err := base64.StdEncoding.DecodeString(signed_str)
			if err != nil {
				logging.Error("cannot decode sign")
				http.Error(w, "cannot decode sign", 500)
				return
			}

			toSign := fmt.Sprintf("%s%s", hostname, time_str)
			logging.Trace("unsign started")
			err = unsigner.Unsign([]byte(toSign), signed)
			logging.Trace("unsign finished")
			if err != nil {
				logging.Errorf("-> invalid signature: %v <-", string(signed))
				http.Error(w, "invalid signature", 500)
				return
			}
		}

		h(w, r, ps)
	}
}
Esempio n. 2
0
// Subscribe returns a new Subscription that uses collector to collected DataPoints.
func Subscribe(collector Collector, opt *SubOptions) Subscription {
	var delay time.Duration
	var err error

	if opt == nil {
		logging.Trace("Subscribe: opt is nil, use default. 1, 5s")
		opt = &SubOptions{
			MaxPending:   1,
			DelayOnError: "5s",
		}
	} else if opt.MaxPending <= 0 {
		logging.Trace("opt.MaxPending is below 0, use default 1 instead")
		opt.MaxPending = 1
	}

	if delay, err = time.ParseDuration(opt.DelayOnError); delay < time.Millisecond*time.Duration(100) || err != nil {
		logging.Trace("opt.DelayOnError is too frequent, use default:  100ms ")
		delay = time.Duration(100) * time.Millisecond
	}

	s := sub{
		collector:      collector,
		updates:        make(chan MultiDataPoint), // for Updates
		closing:        make(chan chan error),     // for Close
		maxPending:     opt.MaxPending,            //
		delay_on_error: delay,                     // delay on collect error
		name:           collector.Name(),
	}
	go s.loop()
	return s
}
Esempio n. 3
0
func Stop() error {
	if Running() {
		switch config.CoreConf.Config_strategy {
		case config.ETCD:
			logging.Trace("Stopping etcd strategy")
			done <- nil
		case config.REGISTRY:
			logging.Trace("Stopping registry strategy")
		default:
			logging.Trace("Stopping default file strategy")
			close_core()
		}
	}
	logging.Info("core stopped")
	return nil
}
Esempio n. 4
0
func CmdServiceStart(c *cli.Context) {
	logging.Trace("CmdServiceStart")
	// -----------------------------------
	err := HelperService.StartService()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s started\n", HelperService.Name())
		logging.Tracef("service %s started\n", HelperService.Name())
	}

	err = PrimaryService.StartService()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s started\n", PrimaryService.Name())
		logging.Tracef("service %s started\n", PrimaryService.Name())
	}
}
Esempio n. 5
0
func CmdServiceStatus(c *cli.Context) {
	logging.Trace("CmdServiceStatus")

	// -----------------------------------
	state, err := HelperService.Status()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)
	} else {
		fmt.Printf("service %s is %s\n", HelperService.Name(), servicelib.StateToString(state))
		logging.Tracef("service %s is %s\n", HelperService.Name(), servicelib.StateToString(state))
	}

	state, err = PrimaryService.Status()
	if err != nil {
		fmt.Println("error: ", err)
		logging.Tracef("error: %v", err)

	} else {
		fmt.Printf("service %s is %s\n", PrimaryService.Name(), servicelib.StateToString(state))
		logging.Tracef("service %s is %s\n", PrimaryService.Name(), servicelib.StateToString(state))
	}

}
Esempio n. 6
0
func (b *kafkaBackend) loop() {
	var (
		startConsuming    <-chan newcore.MultiDataPoint
		try_connect_first chan bool
		try_connect_tick  <-chan time.Time
	)
	startConsuming = b.updates
	logging.Info("kafkaBackend.loop started")

	for {
		if b.producer == nil && try_connect_first == nil && try_connect_tick == nil {
			startConsuming = nil // disable consuming

			try_connect_first = make(chan bool)
			logging.Debug("trying to connect to kafka first time.")

			// trying to connect to kafka first time
			go func() {
				err := b.connect()
				if b.producer != nil && err == nil {
					logging.Debugf("connect kafka first time OK: %v", b.producer)
					try_connect_first <- true
				} else {
					logging.Criticalf("connect to kafka failed %s", err)
					try_connect_first <- false
				}
			}()
		}
		if startConsuming != nil {
			logging.Trace("kafkaBackend consuming started")
		}

		select {
		case md := <-startConsuming:
			for idx, p := range md {
				b.producer.Input() <- &sarama.ProducerMessage{
					Topic: b.conf.Topic_id,
					Key:   sarama.StringEncoder(p.Metric),
					Value: p,
				}
				_d, _ := p.Encode()
				logging.Tracef("kafka producer ---> %d,  %s", idx, _d)
			}
			logging.Debugf("kafkaBackend consuming finished: count: %d", len(md))
		case connected := <-try_connect_first:
			try_connect_first = nil // disable this branch
			if !connected {
				// failed open it the first time,
				// then we try to open file with time interval, until connected successfully.
				logging.Critical("connect first time failed, try to connect with interval of 1s")
				try_connect_tick = time.Tick(time.Second * 1)
			} else {
				logging.Debug("kafka connected the first time.")
				startConsuming = b.updates
			}
		case <-try_connect_tick:
			// try to connect with interval
			err := b.connect()
			if b.producer != nil && err == nil {
				// finally connected.
				try_connect_tick = nil
				startConsuming = b.updates
			} else {
				logging.Criticalf("kafka backend trying to connect but failed: %s", err)
			}
		case errc := <-b.closing:
			logging.Info("kafaBackend.loop closing")
			startConsuming = nil // stop comsuming
			errc <- nil
			close(b.updates)
			logging.Info("kafaBackend.loop closed")
			return
		}
	}
}
Esempio n. 7
0
func CmdServiceRestart(c *cli.Context) {
	logging.Trace("CmdServiceRestart")

	CmdServiceStop(c)
	CmdServiceStart(c)
}
func (c *InfluxdbClient_v088) Write(bp client090.BatchPoints) (*client090.Response, error) {
	// logging.Debug("InfluxdbClient_v088.Write")
	// v0.9.0-rc7 [
	//  {
	//      Name: "a",
	//      Timestamp: "1",
	//      Fields: {"f1": "v1", "f2": "v2"},
	//      Precision: "s"
	//  }
	// ]

	// v0.8.8  [
	//   {
	//     "name": "log_lines",
	//     "columns": ["time", "sequence_number", "line"],
	//     "points": [
	//       [1400425947368, 1, "this line is first"],
	//       [1400425947368, 2, "and this is second"]
	//     ]
	//   }
	// ]

	var series []*client088.Series

	for _, p := range bp.Points {
		s := client088.Series{}
		// s.Name = p.Name
		name, err := newcore.FlatMetricKeyAndTags(c.flat_tpl, p.Measurement, p.Tags)
		if err != nil {
			logging.Error("FlatMetricKeyAndTags Failed!", err)
			return nil, err
		}
		s.Name = name

		point := []interface{}{}

		// time, first
		s.Columns = append(s.Columns, "time")
		point = append(point, p.Time.UnixNano()/1000000)

		// then others
		for key, value := range p.Fields {
			s.Columns = append(s.Columns, key)
			point = append(point, value)
		}

		s.Points = append(s.Points, point)

		logging.Tracef("influxdb --> %+v", s)

		series = append(series, &s)
	}

	// pretty.Println(series)

	err := c.client.WriteSeriesWithTimePrecision(series, "ms")
	if err != nil {
		logging.Errorf("InfluxdbClient_v088.Write.WriteSeriesWithTimePrecision Error: %v", err)
	} else {
		logging.Trace("InfluxdbClient_v088.Write Done No Error")
	}

	return nil, err
}